code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [False] * len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [-1] * len(lowerCAmelCase_ )
def dfs(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase_ , 1 - c )
for i in range(len(lowerCAmelCase_ ) ):
if not visited[i]:
dfs(lowerCAmelCase_ , 0 )
for i in range(len(lowerCAmelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a__ : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( __snake_case = "AAPL" ) -> str:
"""simple docstring"""
_UpperCamelCase = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_UpperCamelCase = BeautifulSoup(requests.get(__snake_case ).text, '''html.parser''' )
_UpperCamelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''', class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 359 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bert'
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-12 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 100 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ) -> Optional[Any]:
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5e-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=10 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
lowercase__ : Optional[Any] = load('''accuracy''')
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
__A ,__A : Tuple = eval_pred
__A : List[Any] = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
super().__init__()
__A : Dict = trainer
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
if control.should_evaluate:
__A : Dict = deepcopy(_UpperCAmelCase)
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train')
return control_copy
def _lowerCAmelCase ( ) -> Optional[int]:
__A : str = get_args()
set_seed(args.seed )
__A : int = load_dataset('codeparrot/codecomplex' , split='train' )
__A : Union[str, Any] = dataset.train_test_split(test_size=0.2 )
__A : Tuple = train_test['test'].train_test_split(test_size=0.5 )
__A : Optional[Any] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__A : str = tokenizer.eos_token
__A : Optional[int] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__A : List[str] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__A : List[str] = False
__A : str = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case : Union[str, Any] ):
__A : Optional[Any] = tokenizer(example['src'] , truncation=__snake_case , max_length=10_24 )
__A : Optional[int] = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__A : Any = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
__A : Any = DataCollatorWithPadding(tokenizer=__snake_case )
__A : str = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__A : Optional[Any] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ : List[Any] = pd.read_csv('''sample_data.csv''', header=None)
lowercase__ : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ : Any = df.iloc[:, 1:2]
lowercase__ : int = actual_data.values.reshape(len_data, 1)
lowercase__ : int = MinMaxScaler().fit_transform(actual_data)
lowercase__ : Dict = 10
lowercase__ : List[str] = 5
lowercase__ : Dict = 20
lowercase__ : Dict = len_data - periods * look_back
lowercase__ : Any = actual_data[:division]
lowercase__ : Optional[int] = actual_data[division - look_back :]
lowercase__ , lowercase__ : Optional[Any] = [], []
lowercase__ , lowercase__ : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ : List[Any] = np.array(train_x)
lowercase__ : List[Any] = np.array(test_x)
lowercase__ : str = np.array([list(i.ravel()) for i in train_y])
lowercase__ : Any = np.array([list(i.ravel()) for i in test_y])
lowercase__ : Optional[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowercase__ : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowercase__ : int = model.predict(x_test) | 190 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase: Optional[int] = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[int] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
a = embedding_size
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertModel(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
a = [input_ids, input_mask]
a = model(lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForPreTraining(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_choices
a = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
a = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.num_labels
a = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertModelTest.TFMobileBertModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
a = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
a = tf.constant([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = [1, 6, 30522]
self.assertEqual(output.shape , lowerCamelCase_ )
a = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 71 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Tuple =logging.get_logger(__name__)
_A : Any ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[int] ={
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_A : Dict ={
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = GPTaTokenizer
def __init__( self: Any , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[Any]="<|endoftext|>" , UpperCamelCase__: Tuple="<|endoftext|>" , UpperCamelCase__: int="<|endoftext|>" , UpperCamelCase__: str=False , **UpperCamelCase__: Any , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("""add_bos_token""" , UpperCamelCase__ )
lowerCamelCase__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Optional[int] = add_prefix_space
lowerCamelCase__ : Dict = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
def lowerCamelCase_ ( self: str , *UpperCamelCase__: str , **UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , *UpperCamelCase__: List[str] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Tuple = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 41 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = 0.0
for coeff in reversed(UpperCamelCase ):
lowerCamelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
_A : Any =(0.0, 0.0, 5.0, 9.3, 7.0)
_A : Optional[Any] =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 41 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A : List[Any] = 4
A : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
A : Dict = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 115 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A : Union[str, Any] = nn.Linear(3, 4 )
A : Union[str, Any] = nn.BatchNormad(4 )
A : Optional[Any] = nn.Linear(4, 5 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, *lowerCamelCase__, **lowerCamelCase__ ):
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return output + 1
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(test_model._hf_hook, lowerCamelCase__ )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Optional[int] = ModelHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__, append=lowerCamelCase__ )
self.assertEqual(isinstance(test_model._hf_hook, lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ), 2 )
self.assertTrue(hasattr(lowerCamelCase__, """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__, """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ["""x"""] )
remove_hook_from_module(lowerCamelCase__ )
self.assertFalse(hasattr(lowerCamelCase__, """_hf_hook""" ) )
self.assertFalse(hasattr(lowerCamelCase__, """_old_forward""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Optional[int] = test_model(x + 1 )
A : List[str] = test_model(x + 2 )
A : List[str] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Optional[Any] = PreForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Any = SequentialHook(PreForwardHook(), PreForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : Tuple = ModelForTest()
A : Any = torch.randn(2, 3 )
A : Any = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : str = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A : Tuple = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1, atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A : Dict = SequentialHook(PostForwardHook(), PostForwardHook() )
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = test_model(lowerCamelCase__ )
assert torch.allclose(lowerCamelCase__, output + 2, atol=1e-5 )
def _lowerCAmelCase ( self ):
A : List[Any] = ModelForTest()
A : Tuple = torch.randn(2, 3 )
A : Union[str, Any] = test_model(lowerCamelCase__ )
A : List[Any] = PostForwardHook()
add_hook_to_module(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = test_model(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__, output + 1 ) )
self.assertTrue(outputa.requires_grad )
A : int = True
A : Tuple = test_model(lowerCamelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) )
self.assertEqual(model.lineara.weight.device, torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A : str = torch.randn(2, 3 )
A : int = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCamelCase__, AlignDevicesHook(io_same_device=lowerCamelCase__ ) )
A : int = torch.randn(2, 3 ).to(0 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, torch.device(0 ) )
def _lowerCAmelCase ( self ):
A : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : int = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : int = torch.randn(2, 3 )
A : List[Any] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCamelCase__ ) )
add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCamelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : int = torch.randn(2, 3 )
A : str = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Optional[int] = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : List[str] = torch.randn(2, 3 )
A : Optional[int] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, offload_buffers=lowerCamelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[str] = torch.randn(2, 3 )
A : List[str] = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
def _lowerCAmelCase ( self ):
A : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# This will move each submodule on different devices
A : List[str] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A : Any = torch.device(lowerCamelCase__ )
self.assertEqual(model.batchnorm.running_mean.device, lowerCamelCase__ )
A : Optional[Any] = torch.randn(2, 3 )
A : Tuple = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCamelCase__, execution_device=lowerCamelCase__, offload=lowerCamelCase__, weights_map=model.state_dict(), offload_buffers=lowerCamelCase__, )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device, torch.device("""meta""" ) )
A : List[Any] = torch.randn(2, 3 )
A : Dict = model(lowerCamelCase__ )
self.assertEqual(output.device, lowerCamelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCamelCase__ )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device, torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device, torch.device("""cpu""" ) )
| 115 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> Union[str, Any]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = metric_id
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = [MetricMock(snake_case_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowerCAmelCase_ ( self : Any ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
if "tmp_path" in args:
SCREAMING_SNAKE_CASE_ = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__a , match='https://huggingface.co/docs/evaluate' ):
func(*__a ) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'mctct'
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any]=80_65 ,__lowerCamelCase : Optional[int]=15_36 ,__lowerCamelCase : int=36 ,__lowerCamelCase : str=61_44 ,__lowerCamelCase : str=4 ,__lowerCamelCase : List[Any]=3_84 ,__lowerCamelCase : Dict=9_20 ,__lowerCamelCase : Tuple=1e-5 ,__lowerCamelCase : Optional[int]=0.3 ,__lowerCamelCase : Any="relu" ,__lowerCamelCase : List[str]=0.02 ,__lowerCamelCase : int=0.3 ,__lowerCamelCase : Union[str, Any]=0.3 ,__lowerCamelCase : List[str]=1 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : int=1 ,__lowerCamelCase : Union[str, Any]=0.3 ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[str]=(7,) ,__lowerCamelCase : List[Any]=(3,) ,__lowerCamelCase : Optional[int]=80 ,__lowerCamelCase : List[Any]=1 ,__lowerCamelCase : int=None ,__lowerCamelCase : str="sum" ,__lowerCamelCase : List[str]=False ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase ,pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = intermediate_size
a = num_attention_heads
a = attention_head_dim
a = max_position_embeddings
a = layer_norm_eps
a = layerdrop
a = hidden_act
a = initializer_range
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = pad_token_id
a = bos_token_id
a = eos_token_id
a = conv_glu_dim
a = conv_dropout
a = num_conv_layers
a = input_feat_per_channel
a = input_channels
a = conv_channels
a = ctc_loss_reduction
a = ctc_zero_infinity
# prevents config testing fail with exporting to json
a = list(__lowerCamelCase )
a = list(__lowerCamelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 330 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 330 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCAmelCase : str = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_UpperCAmelCase : List[str] = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __magic_name__( ):
__lowerCAmelCase = (
list(range(ord('''!'''), ord('''~''') + 1)) + list(range(ord('''¡'''), ord('''¬''') + 1)) + list(range(ord('''®'''), ord('''ÿ''') + 1))
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
__lowerCAmelCase = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase, lowerCamelCase))
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__lowerCAmelCase = char
return pairs
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ):
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
__lowerCAmelCase = json.load(__lowercase )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case (self ):
return len(self.encoder )
def _snake_case (self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case (self , __lowercase ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = get_pairs(__lowercase )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__lowercase ):
try:
__lowerCAmelCase = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__lowercase )
__lowerCAmelCase = new_word
if len(__lowercase ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__lowercase )
__lowerCAmelCase = ''' '''.join(__lowercase )
__lowerCAmelCase = word
return word
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
for token in re.findall(self.pat , __lowercase ):
__lowerCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) )
return bpe_tokens
def _snake_case (self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def _snake_case (self , __lowercase ):
return self.decoder.get(__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = ''''''.join(__lowercase )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
__lowerCAmelCase = 0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCAmelCase = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case (self , __lowercase , __lowercase=False , **__lowercase ):
__lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
__lowerCAmelCase = ''' ''' + text
return (text, kwargs)
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = PaddingStrategy.DO_NOT_PAD , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = super()._pad(
encoded_inputs=__lowercase , max_length=__lowercase , padding_strategy=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , )
# Load from model defaults
if return_attention_mask is None:
__lowerCAmelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCAmelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowercase )
if needs_to_be_padded:
__lowerCAmelCase = len(__lowercase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCAmelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCAmelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 174 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__lowerCAmelCase = (boundary[1] - boundary[0]) / steps
__lowerCAmelCase = boundary[0]
__lowerCAmelCase = boundary[1]
__lowerCAmelCase = make_points(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = 0.0
y += (h / 2.0) * f(lowerCamelCase)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase)
y += (h / 2.0) * f(lowerCamelCase)
return y
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = a + h
while x < (b - h):
yield x
__lowerCAmelCase = x + h
def __magic_name__( lowerCamelCase): # enter your function here
__lowerCAmelCase = (x - 0) * (x - 0)
return y
def __magic_name__( ):
__lowerCAmelCase = 0.0 # Lower bound of integration
__lowerCAmelCase = 1.0 # Upper bound of integration
__lowerCAmelCase = 10.0 # define number of steps or resolution
__lowerCAmelCase = [a, b] # define boundary of integration
__lowerCAmelCase = method_a(lowerCamelCase, lowerCamelCase)
print(F"""y = {y}""")
if __name__ == "__main__":
main()
| 174 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase = '''true'''
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=82 , SCREAMING_SNAKE_CASE_ : List[Any]=16 ) -> Union[str, Any]:
set_seed(42 )
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Optional[Any]=82 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'
def lowercase (SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowercase () -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 5_12 )
accelerator.state._reset_state()
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCamelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCamelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase = np.expand_dims(test_image, axis=0)
__UpperCamelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase = '''Normal'''
if result[0][0] == 1:
__UpperCamelCase = '''Abnormality detected'''
| 38 | 0 |
from math import factorial
def _a ( SCREAMING_SNAKE_CASE_ : int = 20 ):
__lowerCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowerCAmelCase = n // 2
return int(factorial(SCREAMING_SNAKE_CASE_ ) / (factorial(SCREAMING_SNAKE_CASE_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( snake_case__ ):
_a : Optional[int] = """decision_transformer"""
_a : Optional[int] = ["""past_key_values"""]
_a : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 92 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = AltDiffusionPipeline
snake_case__ : Dict = TEXT_TO_IMAGE_PARAMS
snake_case__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : int ) -> int:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(lowercase__ )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__SCREAMING_SNAKE_CASE = 7_7
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]=0 ) -> int:
if str(lowercase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowercase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowercase__ )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowercase__ )
__SCREAMING_SNAKE_CASE = alt_pipe.to(lowercase__ )
alt_pipe.set_progress_bar_config(disable=lowercase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase__ )
__SCREAMING_SNAKE_CASE = "A photo of an astronaut"
__SCREAMING_SNAKE_CASE = alt_pipe(**lowercase__ )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowercase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowercase__ )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowercase__ )
__SCREAMING_SNAKE_CASE = alt_pipe.to(lowercase__ )
alt_pipe.set_progress_bar_config(disable=lowercase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowercase__ )
__SCREAMING_SNAKE_CASE = alt_pipe(**lowercase__ )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ) -> str:
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowercase__ )
__SCREAMING_SNAKE_CASE = alt_pipe.to(lowercase__ )
alt_pipe.set_progress_bar_config(disable=lowercase__ )
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=lowercase__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowercase__ , safety_checker=lowercase__ )
__SCREAMING_SNAKE_CASE = alt_pipe.to(lowercase__ )
alt_pipe.set_progress_bar_config(disable=lowercase__ )
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=lowercase__ , num_inference_steps=2 , output_type="numpy" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 358 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Optional[int] = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return x + 2
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
UpperCAmelCase__ = "x = 3"
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
UpperCAmelCase__ = "x = y"
UpperCAmelCase__ = {"y": 5}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 5, "y": 5} )
def UpperCAmelCase_ ( self :Optional[Any] ) -> int:
UpperCAmelCase__ = "y = add_two(x)"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = "x = 3"
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3} )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase__ = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = "x = 3\ny = 5"
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} )
def UpperCAmelCase_ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase__ = "text = f'This is x: {x}.'"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase , {"x": 3, "text": "This is x: 3."} )
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 2} )
UpperCAmelCase__ = {"x": 8}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 8, "y": 5} )
def UpperCAmelCase_ ( self :Tuple ) -> str:
UpperCAmelCase__ = "test_list = [x, add_two(x)]"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
self.assertListEqual(lowerCamelCase , [3, 5] )
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
UpperCAmelCase__ = "y = x"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {} , state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 3} )
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} )
UpperCAmelCase__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCAmelCase__ = {"x": 3}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def UpperCAmelCase_ ( self :Tuple ) -> int:
UpperCAmelCase__ = "x = 0\nfor i in range(3):\n x = i"
UpperCAmelCase__ = {}
UpperCAmelCase__ = evaluate(lowerCamelCase , {"range": range} , state=lowerCamelCase )
assert result == 2
self.assertDictEqual(lowerCamelCase , {"x": 2, "i": 2} )
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """markuplm"""
def __init__( self :Dict , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :List[Any]=768 , lowerCamelCase :Union[str, Any]=12 , lowerCamelCase :Optional[int]=12 , lowerCamelCase :List[str]=3072 , lowerCamelCase :Dict="gelu" , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :int=0.02 , lowerCamelCase :int=1e-12 , lowerCamelCase :Tuple=0 , lowerCamelCase :List[str]=0 , lowerCamelCase :int=2 , lowerCamelCase :Optional[int]=256 , lowerCamelCase :List[str]=1024 , lowerCamelCase :Optional[Any]=216 , lowerCamelCase :str=1001 , lowerCamelCase :List[str]=32 , lowerCamelCase :Dict=50 , lowerCamelCase :int="absolute" , lowerCamelCase :Union[str, Any]=True , lowerCamelCase :Dict=None , **lowerCamelCase :List[Any] , ) -> int:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
# additional properties
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = max_xpath_tag_unit_embeddings
UpperCAmelCase__ = max_xpath_subs_unit_embeddings
UpperCAmelCase__ = tag_pad_id
UpperCAmelCase__ = subs_pad_id
UpperCAmelCase__ = xpath_unit_hidden_size
| 169 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_snake_case : Dict = (3, 9, -11, 0, 7, 5, 1, -1)
_snake_case : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase_ : Iterable[int] ) -> None:
__lowerCAmelCase = None
for i in sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ):
__lowerCAmelCase = Node(lowerCAmelCase_ , self.head )
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
__lowerCAmelCase = self.head
while node:
yield node.data
__lowerCAmelCase = node.next_node
def __len__( self : Any ) -> int:
return sum(1 for _ in self )
def __str__( self : Optional[Any] ) -> str:
return " -> ".join([str(lowerCAmelCase_ ) for node in self] )
def a_ ( lowerCAmelCase_ : SortedLinkedList, lowerCAmelCase_ : SortedLinkedList ):
return SortedLinkedList(list(lowerCAmelCase_ ) + list(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 207 |
_snake_case : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_snake_case : List[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
__lowerCAmelCase = from_type.lower().strip('s' )
__lowerCAmelCase = to_type.lower().strip('s' )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = UNIT_SYMBOL.get(lowerCAmelCase_, lowerCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
__lowerCAmelCase = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
__lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
__lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
__lowerCAmelCase = 1
if from_exponent > to_exponent:
__lowerCAmelCase = from_exponent - to_exponent
else:
__lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(10, lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 207 | 1 |
import os
from pathlib import Path
def UpperCamelCase ( ):
from torch.utils.cpp_extension import load
snake_case : str = Path(__lowerCamelCase ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
snake_case : int = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , __lowerCamelCase , with_cuda=__lowerCamelCase , extra_include_paths=[str(__lowerCamelCase )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 59 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ : Union[str, Any] = remove_duplicates(key.upper() )
lowerCAmelCase__ : Dict = len(_a )
# First fill cipher with key characters
lowerCAmelCase__ : Any = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
lowerCAmelCase__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ : str = alphabet[i - offset]
lowerCAmelCase__ : Dict = char
return cipher_alphabet
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = input('''Enter message to encode or decode: ''' ).strip()
lowerCAmelCase__ : Tuple = input('''Enter keyword: ''' ).strip()
lowerCAmelCase__ : List[str] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCAmelCase__ : List[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCAmelCase__ : Dict = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 131 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Union[str, Any] = 'encoder-decoder'
A_ : Optional[int] = True
def __init__(self : List[str] , **a__ : Any ):
"""simple docstring"""
super().__init__(**a__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__snake_case = kwargs.pop('''encoder''' )
__snake_case = encoder_config.pop('''model_type''' )
__snake_case = kwargs.pop('''decoder''' )
__snake_case = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__snake_case = AutoConfig.for_model(a__ , **a__ )
__snake_case = AutoConfig.for_model(a__ , **a__ )
__snake_case = True
@classmethod
def a (cls : Optional[Any] , a__ : PretrainedConfig , a__ : PretrainedConfig , **a__ : Optional[Any] ):
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__snake_case = True
__snake_case = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def a (self : str ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.encoder.to_dict()
__snake_case = self.decoder.to_dict()
__snake_case = self.__class__.model_type
return output
| 238 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
"""simple docstring"""
a =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
a =bs[:]
a =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
a =[chr(lowercase ) for n in cs]
return dict(zip(lowercase , lowercase ) )
def _A ( lowercase ):
"""simple docstring"""
a =set()
a =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a =char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> List[Any]:
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='''utf-8''' ) as vocab_handle:
a =json.load(__A )
a ={v: k for k, v in self.encoder.items()}
a =errors # how to handle errors in decoding
a =bytes_to_unicode()
a ={v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='''utf-8''' ) as merges_handle:
a =merges_handle.read().split('''\n''' )[1:-1]
a =[tuple(merge.split() ) for merge in bpe_merges]
a =dict(zip(__A , range(len(__A ) ) ) )
a ={}
a =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
a =tuple(__A )
a =get_pairs(__A )
if not pairs:
return token
while True:
a =min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a =bigram
a =[]
a =0
while i < len(__A ):
try:
a =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a =tuple(__A )
a =new_word
if len(__A ) == 1:
break
else:
a =get_pairs(__A )
a =''' '''.join(__A )
a =word
return word
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
a =[]
for token in re.findall(self.pat , __A ):
a =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[int]:
return self.decoder.get(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Any:
a =''''''.join(__A )
a =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' )
a =0
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
a =token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , __A , __A=False , **__A ) -> str:
a =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
a =''' ''' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = PaddingStrategy.DO_NOT_PAD , __A = None , __A = None , ) -> dict:
a =super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
a ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a =len(encoded_inputs['''global_attention_mask'''] ) != len(__A )
if needs_to_be_padded:
a =len(__A ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
a =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs | 81 |
"""simple docstring"""
lowerCamelCase_ : int = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Union[str, Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 81 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase (_A , _A ):
"""simple docstring"""
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path / 'cache'
_lowerCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase : Optional[int] = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A , keep_in_memory=_A ).read()
_check_sql_dataset(_A , _A )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = tmp_path / 'cache'
_lowerCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
_lowerCAmelCase : Optional[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=_A , cache_dir=_A ).read()
_check_sql_dataset(_A , _A )
def lowercase (_A ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(_A ) ) as con:
_lowerCAmelCase : Any = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = tmp_path / 'cache'
_lowerCAmelCase : Union[str, Any] = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : Any = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase : int = iter_sql_file(_A )
_lowerCAmelCase : List[str] = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tmp_path / 'cache'
_lowerCAmelCase : Any = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase : Any = iter_sql_file(_A )
_lowerCAmelCase : List[Any] = iter_sql_file(_A )
for rowa, rowa in zip(_A , _A ):
assert rowa == rowa
@require_sqlalchemy
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = tmp_path / 'cache'
_lowerCAmelCase : Optional[int] = os.path.join(_A , 'tmp.sql' )
_lowerCAmelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_A ).read()
with pytest.raises(_A ):
SqlDatasetWriter(_A , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 25 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 1 |
from __future__ import annotations
__UpperCamelCase : Dict = [True] * 1000001
__UpperCamelCase : Optional[int] = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__UpperCamelCase : Tuple = False
i += 1
def a_ ( _A ) -> bool:
"""simple docstring"""
return seive[n]
def a_ ( _A ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(_A ) )
def a_ ( _A = 1000000 ) -> list[int]:
"""simple docstring"""
snake_case__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
snake_case__ = str(_A )
snake_case__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def a_ ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 307 |
import random
from typing import Any
def a_ ( _A ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_A ) ):
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowerCamelCase ( nn.Module ):
def __init__(self ) -> Optional[int]:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def snake_case_ (self , __a ) -> Any:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Dict:
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , model.state_dict() )
UpperCamelCase = os.path.join(__a , "index.json" )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(__a , F"{key}.dat" )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=__a )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(__a , "weight" , __a , {} )
UpperCamelCase = os.path.join(__a , "weight.dat" )
self.assertTrue(os.path.isfile(__a ) )
self.assertDictEqual(__a , {"weight": {"shape": [2, 3], "dtype": str(__a ).split("." )[1]}} )
UpperCamelCase = load_offloaded_weight(__a , index["weight"] )
self.assertTrue(torch.equal(__a , __a ) )
def snake_case_ (self ) -> int:
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if "linear2" not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
UpperCamelCase = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if "weight" in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
UpperCamelCase = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
def snake_case_ (self ) -> str:
UpperCamelCase = {"a.1": 0, "a.10": 1, "a.2": 2}
UpperCamelCase = extract_submodules_state_dict(__a , ["a.1", "a.2"] )
self.assertDictEqual(__a , {"a.1": 0, "a.2": 2} )
UpperCamelCase = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
UpperCamelCase = extract_submodules_state_dict(__a , ["a.1", "a.2"] )
self.assertDictEqual(__a , {"a.1.a": 0, "a.2.a": 2} )
| 244 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowerCamelCase ( _lowercase ):
def __init__(self , **__a ) -> Optional[int]:
super().__init__(**__a )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def snake_case_ (self , **__a ) -> List[Any]:
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> str:
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def snake_case_ (self , __a , __a=64 , __a = 0 , __a = 5_12 / 15_00 , __a = 32 , __a = 1 , ) -> List[str]:
UpperCamelCase = load_image(__a )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCamelCase = self.image_processor(images=__a , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(__a , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __a , __a ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> int:
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Optional[int]:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(__a )
UpperCamelCase = torch.cat(__a )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCamelCase = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 244 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 'speech_to_text_2'
SCREAMING_SNAKE_CASE : Dict = ['past_key_values']
SCREAMING_SNAKE_CASE : List[Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any ,lowercase__ : List[str]=1_0_0_0_0 ,lowercase__ : List[Any]=6 ,lowercase__ : int=2_0_4_8 ,lowercase__ : int=4 ,lowercase__ : Any=0.0 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]="relu" ,lowercase__ : Union[str, Any]=2_5_6 ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=0.0 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Any=2 ,lowercase__ : Dict=True ,lowercase__ : List[str]=1 ,lowercase__ : Tuple=0 ,lowercase__ : Optional[Any]=2 ,lowercase__ : Optional[int]=1_0_2_4 ,**lowercase__ : List[Any] ,):
__lowercase = vocab_size
__lowercase = d_model
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = decoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = max_target_positions
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,decoder_start_token_id=lowercase__ ,**lowercase__ ,)
| 104 |
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if len(re.findall('[ATCG]' , SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 212 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = []
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.events.append("""on_init_end""" )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
self.events.append("""on_train_begin""" )
def lowerCamelCase__ (self : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , **_UpperCAmelCase : int ) -> str:
"""simple docstring"""
self.events.append("""on_train_end""" )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , **_UpperCAmelCase : int ) -> int:
"""simple docstring"""
self.events.append("""on_epoch_begin""" )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
self.events.append("""on_epoch_end""" )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
self.events.append("""on_step_begin""" )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
self.events.append("""on_step_end""" )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.events.append("""on_evaluate""" )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
self.events.append("""on_predict""" )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
self.events.append("""on_save""" )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , **_UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
self.events.append("""on_log""" )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> int:
"""simple docstring"""
self.events.append("""on_prediction_step""" )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Any=0 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : int=None , _UpperCAmelCase : Union[str, Any]=False , **_UpperCAmelCase : int ) -> str:
"""simple docstring"""
lowercase__ = RegressionDataset(length=_UpperCAmelCase )
lowercase__ = RegressionDataset(length=_UpperCAmelCase )
lowercase__ = RegressionModelConfig(a=_UpperCAmelCase , b=_UpperCAmelCase )
lowercase__ = RegressionPreTrainedModel(_UpperCAmelCase )
lowercase__ = TrainingArguments(self.output_dir , disable_tqdm=_UpperCAmelCase , report_to=[] , **_UpperCAmelCase )
return Trainer(
_UpperCAmelCase , _UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , callbacks=_UpperCAmelCase , )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
# Order doesn't matter
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
lowercase__ = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , cba.__class__ )
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(cba.__class__ , _UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""on_init_end""", """on_train_begin"""]
lowercase__ = 0
lowercase__ = len(trainer.get_eval_dataloader() )
lowercase__ = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.get_trainer()
lowercase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ = self.get_trainer(disable_tqdm=_UpperCAmelCase )
lowercase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
lowercase__ = self.get_trainer()
lowercase__ = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# We can also add, pop, or remove by instance
lowercase__ = self.get_trainer()
lowercase__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
lowercase__ = self.get_trainer()
lowercase__ = trainer.callback_handler.callbacks[0]
lowercase__ = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=_UpperCAmelCase )
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
lowercase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowercase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowercase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 146 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowercase__ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowercase__ = model(_UpperCAmelCase )["""last_hidden_state"""]
lowercase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 146 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = 1
__A : Any = 3
__A : List[str] = (32, 32)
__A : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase__( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : int = self.dummy_cond_unet_upscale
__A : Union[str, Any] = DDPMScheduler()
__A : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__A : int = self.dummy_vae
__A : int = self.dummy_text_encoder
__A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Dict = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : List[str] = '''A painting of a squirrel eating a burger'''
__A : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : List[str] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
__A : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : str = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
__A : Tuple = image[0, -3:, -3:, -1]
__A : int = image_from_tuple[0, -3:, -3:, -1]
__A : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__A : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__A : Dict = self.dummy_cond_unet_upscale
__A : List[str] = DDPMScheduler()
__A : str = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[int] = self.dummy_vae
__A : Optional[Any] = self.dummy_text_encoder
__A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__A : Any = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Any = '''A painting of a squirrel eating a burger'''
__A : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
__A : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__A : Any = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__A : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.dummy_cond_unet_upscale
__A : int = DDPMScheduler()
__A : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__A : Optional[Any] = self.dummy_vae
__A : List[str] = self.dummy_text_encoder
__A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A : int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__A : Union[str, Any] = unet.half()
__A : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__A : Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__A : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__A : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__A : Optional[Any] = torch.manual_seed(0 )
__A : Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
__A : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__A : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Union[str, Any] = '''a cat sitting on a park bench'''
__A : Union[str, Any] = torch.manual_seed(0 )
__A : Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__A : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__A : Dict = '''a cat sitting on a park bench'''
__A : Any = torch.manual_seed(0 )
__A : Optional[int] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
__A : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase__( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__A : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__A : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__A : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__A : Tuple = '''a cat sitting on a park bench'''
__A : Tuple = torch.manual_seed(0 )
__A : List[str] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
__A : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 179 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
SCREAMING_SNAKE_CASE__ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
a__ : Optional[datasets.Features] = None
a__ : str = "utf-8"
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True # deprecated
a__ : Optional[int] = None # deprecated
a__ : int = 10 << 20 # 10MB
a__ : Optional[bool] = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
a__ : Optional[Any] = JsonConfig
def __A ( self : Optional[int] ) -> List[str]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__lowerCamelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
__lowerCamelCase = data_files
if isinstance(__snake_case , __snake_case ):
__lowerCamelCase = [files]
__lowerCamelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
__lowerCamelCase = [files]
__lowerCamelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={'''files''': files} ) )
return splits
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__lowerCamelCase = self.config.features.arrow_schema.field(__snake_case ).type
__lowerCamelCase = pa_table.append_column(__snake_case , pa.array([None] * len(__snake_case ) , type=__snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCamelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCamelCase = json.load(__snake_case )
# We keep only the field we are interested in
__lowerCamelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__snake_case , (list, tuple) ):
__lowerCamelCase = set().union(*[row.keys() for row in dataset] )
__lowerCamelCase = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
else:
__lowerCamelCase = dataset
__lowerCamelCase = pa.Table.from_pydict(__snake_case )
yield file_idx, self._cast_table(__snake_case )
# If the file has one json object per line
else:
with open(__snake_case , '''rb''' ) as f:
__lowerCamelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__lowerCamelCase = max(self.config.chunksize // 32 , 16 << 10 )
__lowerCamelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
__lowerCamelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__lowerCamelCase = batch.decode(self.config.encoding , errors=__snake_case ).encode('''utf-8''' )
try:
while True:
try:
__lowerCamelCase = paj.read_json(
io.BytesIO(__snake_case ) , read_options=paj.ReadOptions(block_size=__snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__snake_case , pa.ArrowInvalid )
and "straddling" not in str(__snake_case )
or block_size > len(__snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(__snake_case )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__lowerCamelCase = json.load(__snake_case )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(__snake_case )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__snake_case , __snake_case ): # list is the only sequence type supported in JSON
try:
__lowerCamelCase = set().union(*[row.keys() for row in dataset] )
__lowerCamelCase = {col: [row.get(__snake_case ) for row in dataset] for col in keys}
__lowerCamelCase = pa.Table.from_pydict(__snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__snake_case )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__snake_case )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(__snake_case )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__snake_case )
batch_idx += 1
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a) , "Tatoeba directory does not exist.")
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self) -> str:
_A : List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCamelCase)
@slow
def _lowerCamelCase ( self) -> Tuple:
self.resolver.convert_models(["heb-eng"])
@slow
def _lowerCamelCase ( self) -> str:
_A , _A : Any = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowerCamelCase)
assert mmeta["long_pair"] == "heb-eng"
| 11 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if any(not isinstance(_a , _a ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(_a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 365 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any]=0.9_99 ,_lowerCamelCase : Optional[int]="cosine" ,) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_lowerCAmelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCAmelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) ,_lowerCamelCase ) )
return torch.tensor(_lowerCamelCase ,dtype=torch.floataa )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_UpperCamelCase : Dict = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0_0_8_5 , a__ = 0.0_1_2 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
_lowerCAmelCase : str = torch.tensor(a__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase : Optional[Any] = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : Optional[Any] = betas_for_alpha_bar(a__ )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
_lowerCAmelCase : List[str] = 1.0 - self.betas
_lowerCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a__ , a__ , a__ )
def __A ( self , a__ , a__=None ):
if schedule_timesteps is None:
_lowerCAmelCase : Dict = self.timesteps
_lowerCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase : List[str] = 1 if len(a__ ) > 1 else 0
else:
_lowerCAmelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
_lowerCAmelCase : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __A ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __A ( self , a__ , a__ , ):
_lowerCAmelCase : Union[str, Any] = self.index_for_timestep(a__ )
if self.state_in_first_order:
_lowerCAmelCase : Optional[int] = self.sigmas[step_index]
else:
_lowerCAmelCase : Optional[Any] = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __A ( self , a__ , a__ = None , a__ = None , ):
_lowerCAmelCase : List[Any] = num_inference_steps
_lowerCAmelCase : Tuple = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase : List[str] = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : List[str] = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : str = (np.arange(a__ , 0 , -step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
_lowerCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase : List[str] = torch.from_numpy(np.log(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = np.interp(a__ , np.arange(0 , len(a__ ) ) , a__ )
_lowerCAmelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase : Any = torch.from_numpy(a__ ).to(device=a__ )
# interpolate sigmas
_lowerCAmelCase : List[str] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCAmelCase : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a__ ).startswith("""mps""" ):
# mps does not support float64
_lowerCAmelCase : List[Any] = torch.from_numpy(a__ ).to(a__ , dtype=torch.floataa )
else:
_lowerCAmelCase : List[str] = torch.from_numpy(a__ ).to(a__ )
# interpolate timesteps
_lowerCAmelCase : List[Any] = self.sigma_to_t(a__ ).to(a__ , dtype=timesteps.dtype )
_lowerCAmelCase : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCAmelCase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase : Tuple = defaultdict(a__ )
def __A ( self , a__ ):
# get log sigma
_lowerCAmelCase : str = sigma.log()
# get distribution
_lowerCAmelCase : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCAmelCase : Optional[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCAmelCase : List[str] = low_idx + 1
_lowerCAmelCase : str = self.log_sigmas[low_idx]
_lowerCAmelCase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase : List[Any] = (low - log_sigma) / (low - high)
_lowerCAmelCase : List[str] = w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCAmelCase : Optional[Any] = (1 - w) * low_idx + w * high_idx
_lowerCAmelCase : Optional[int] = t.view(sigma.shape )
return t
@property
def __A ( self ):
return self.sample is None
def __A ( self , a__ , a__ , a__ , a__ = True , ):
_lowerCAmelCase : List[str] = self.index_for_timestep(a__ )
# advance index counter by 1
_lowerCAmelCase : str = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase : Tuple = self.sigmas[step_index]
_lowerCAmelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCAmelCase : int = self.sigmas[step_index - 1]
_lowerCAmelCase : Any = self.sigmas_interpol[step_index]
_lowerCAmelCase : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase : Optional[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCAmelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCAmelCase : Union[str, Any] = sigma_next - sigma_hat
_lowerCAmelCase : int = self.sample
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def __A ( self , a__ , a__ , a__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
_lowerCAmelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCAmelCase : Any = self.timesteps.to(original_samples.device )
_lowerCAmelCase : int = timesteps.to(original_samples.device )
_lowerCAmelCase : str = [self.index_for_timestep(a__ , a__ ) for t in timesteps]
_lowerCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase : str = sigma.unsqueeze(-1 )
_lowerCAmelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 44 | """simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __lowercase :
'''simple docstring'''
a : Optional[Union[str, Path]] = None
a : bool = False
a : bool = False
a : bool = False
a : Optional[Dict] = None
a : Optional[str] = None
a : bool = False
a : bool = False
a : bool = False
a : bool = True
a : Optional[int] = None
a : int = 1
a : Optional[Union[str, bool]] = None
a : bool = False
a : Optional[Dict] = None
a : Optional[str] = None
def _UpperCAmelCase (self ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
| 361 |
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0 ):
__lowercase = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__snake_case ={"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""BeitFeatureExtractor"""]
__snake_case =["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=True, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=512, __magic_name__=16, __magic_name__=2, __magic_name__=0.02, __magic_name__=3, __magic_name__=4, __magic_name__=None, ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : int = 13
UpperCamelCase__ : Optional[Any] = 7
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = True
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Tuple = 99
UpperCamelCase__ : Any = 384
UpperCamelCase__ : Tuple = 2
UpperCamelCase__ : int = 4
UpperCamelCase__ : Optional[int] = 37
UpperCamelCase__ : str = '''gelu'''
UpperCamelCase__ : Any = 0.1
UpperCamelCase__ : Dict = 0.1
UpperCamelCase__ : Tuple = 512
UpperCamelCase__ : int = 16
UpperCamelCase__ : int = 2
UpperCamelCase__ : Any = 0.02
UpperCamelCase__ : Optional[Any] = 3
UpperCamelCase__ : Any = 4
UpperCamelCase__ : Any = 128
UpperCamelCase__ : List[str] = 2
UpperCamelCase__ : List[str] = 9
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : List[Any] = None
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Dict = None
if self.use_token_type_ids:
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ : str = None
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ : Any = ConvBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=__magic_name__, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFConvBertModel(config=__magic_name__ )
UpperCamelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : Any = [input_ids, input_mask]
UpperCamelCase__ : int = model(__magic_name__ )
UpperCamelCase__ : int = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = TFConvBertForMaskedLM(config=__magic_name__ )
UpperCamelCase__ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.num_labels
UpperCamelCase__ : Optional[int] = TFConvBertForSequenceClassification(config=__magic_name__ )
UpperCamelCase__ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : int = self.num_choices
UpperCamelCase__ : str = TFConvBertForMultipleChoice(config=__magic_name__ )
UpperCamelCase__ : Optional[int] = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : Any = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : Tuple = tf.tile(tf.expand_dims(__magic_name__, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ : int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.num_labels
UpperCamelCase__ : Any = TFConvBertForTokenClassification(config=__magic_name__ )
UpperCamelCase__ : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFConvBertForQuestionAnswering(config=__magic_name__ )
UpperCamelCase__ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : Any = config_and_inputs
UpperCamelCase__ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Optional[Any] = False
a : List[str] = False
a : List[Any] = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = TFConvBertModelTester(self )
UpperCamelCase__ : Optional[int] = ConfigTester(self, config_class=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : Optional[int] = True
if hasattr(__magic_name__, '''use_cache''' ):
UpperCamelCase__ : Union[str, Any] = True
UpperCamelCase__ : Tuple = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : Any = getattr(self.model_tester, '''key_length''', __magic_name__ )
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = self._prepare_for_class(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = model_class(__magic_name__ )
UpperCamelCase__ : List[str] = len(model(__magic_name__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__, saved_model=__magic_name__ )
UpperCamelCase__ : List[Any] = os.path.join(__magic_name__, '''saved_model''', '''1''' )
UpperCamelCase__ : Dict = tf.keras.models.load_model(__magic_name__ )
UpperCamelCase__ : Optional[int] = model(__magic_name__ )
if self.is_encoder_decoder:
UpperCamelCase__ : Union[str, Any] = outputs['''encoder_hidden_states''']
UpperCamelCase__ : Any = outputs['''encoder_attentions''']
else:
UpperCamelCase__ : Optional[Any] = outputs['''hidden_states''']
UpperCamelCase__ : Dict = outputs['''attentions''']
self.assertEqual(len(__magic_name__ ), __magic_name__ )
UpperCamelCase__ : Optional[Any] = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ), [self.model_tester.seq_length, self.model_tester.hidden_size], )
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : str = True
UpperCamelCase__ : Union[str, Any] = getattr(self.model_tester, '''decoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : str = getattr(self.model_tester, '''encoder_seq_length''', self.model_tester.seq_length )
UpperCamelCase__ : List[str] = getattr(self.model_tester, '''key_length''', __magic_name__ )
UpperCamelCase__ : int = getattr(self.model_tester, '''key_length''', __magic_name__ )
def check_decoder_attentions_output(__magic_name__ ):
UpperCamelCase__ : List[str] = len(__magic_name__ )
self.assertEqual(out_len % 2, 0 )
UpperCamelCase__ : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], )
def check_encoder_attentions_output(__magic_name__ ):
UpperCamelCase__ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__magic_name__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], )
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[Any] = model_class(__magic_name__ )
UpperCamelCase__ : Dict = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : int = len(__magic_name__ )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
UpperCamelCase__ : str = model_class(__magic_name__ )
UpperCamelCase__ : List[str] = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : Dict = model_class(__magic_name__ )
UpperCamelCase__ : Optional[Any] = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : Dict = model_class(__magic_name__ )
UpperCamelCase__ : List[Any] = model(self._prepare_for_class(__magic_name__, __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states, __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
UpperCamelCase__ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : int = model(__magic_name__ )[0]
UpperCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape, __magic_name__ )
UpperCamelCase__ : Union[str, Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3], __magic_name__, atol=1E-4 )
| 201 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Dict = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''deta'''
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=900 , _UpperCAmelCase=2048 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=300 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__A : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'])
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : int = backbone_config.pop('model_type')
__A : Tuple = CONFIG_MAPPING[backbone_model_type]
__A : Dict = config_class.from_dict(_UpperCAmelCase)
__A : Optional[Any] = backbone_config
__A : List[str] = num_queries
__A : List[Any] = max_position_embeddings
__A : List[str] = d_model
__A : Any = encoder_ffn_dim
__A : Optional[Any] = encoder_layers
__A : List[Any] = encoder_attention_heads
__A : Any = decoder_ffn_dim
__A : Optional[int] = decoder_layers
__A : Union[str, Any] = decoder_attention_heads
__A : Optional[int] = dropout
__A : Tuple = attention_dropout
__A : Optional[int] = activation_dropout
__A : Tuple = activation_function
__A : int = init_std
__A : Optional[int] = init_xavier_std
__A : Optional[int] = encoder_layerdrop
__A : List[str] = auxiliary_loss
__A : str = position_embedding_type
# deformable attributes
__A : str = num_feature_levels
__A : Union[str, Any] = encoder_n_points
__A : str = decoder_n_points
__A : List[str] = two_stage
__A : List[str] = two_stage_num_proposals
__A : Dict = with_box_refine
__A : int = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
__A : str = class_cost
__A : Dict = bbox_cost
__A : Any = giou_cost
# Loss coefficients
__A : str = mask_loss_coefficient
__A : List[Any] = dice_loss_coefficient
__A : List[Any] = bbox_loss_coefficient
__A : str = giou_loss_coefficient
__A : str = eos_coefficient
__A : List[str] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = copy.deepcopy(self.__dict__)
__A : Optional[int] = self.backbone_config.to_dict()
__A : Optional[Any] = self.__class__.model_type
return output | 368 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
# Check that tokenizer_type ≠ model_type
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__A : str = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TOKENIZER_MAPPING.values()
__A : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase) , _UpperCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCAmelCase)
__A : str = 'Hello, world. How are you?'
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__A : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCAmelCase)
__A : List[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = get_tokenizer_config('bert-base-cased')
__A : Optional[int] = config.pop('_commit_hash' , _UpperCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Dict = get_tokenizer_config(_UpperCAmelCase)
self.assertDictEqual(_UpperCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = get_tokenizer_config(_UpperCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
__A : Optional[Any] = CustomTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = False
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0) | 190 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Tuple =False
class __a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 189 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = len(matrix[0] )
lowerCAmelCase__ :Any = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for row in range(_SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[str] = matrix[col][row] / matrix[row][row]
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ :List[str] = True
for i in range(row + 1 , _SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = matrix[i], matrix[row]
lowerCAmelCase__ :Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Dict = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _snake_case ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 'codegen'
SCREAMING_SNAKE_CASE__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=5_0400 , _lowerCamelCase=2048 , _lowerCamelCase=2048 , _lowerCamelCase=4096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=5_0256 , _lowerCamelCase=5_0256 , _lowerCamelCase=False , **_lowerCamelCase , ):
a :int = vocab_size
a :Tuple = n_ctx
a :Tuple = n_positions
a :Any = n_embd
a :List[Any] = n_layer
a :int = n_head
a :List[str] = n_inner
a :List[str] = rotary_dim
a :Tuple = activation_function
a :int = resid_pdrop
a :str = embd_pdrop
a :List[Any] = attn_pdrop
a :List[Any] = layer_norm_epsilon
a :Any = initializer_range
a :Optional[int] = use_cache
a :Optional[int] = bos_token_id
a :Any = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class _snake_case ( snake_case_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ):
super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase )
if not getattr(self._config , '''pad_token_id''' , _lowerCamelCase ):
# TODO: how to do that better?
a :Optional[int] = 0
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
a :Dict = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
a :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._config.n_head
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
a :int = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
a :Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
a , a :int = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
a :Dict = seqlen + 2
a :List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a :Tuple = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
a :List[str] = common_inputs['''attention_mask''']
if self.use_past:
a :List[str] = ordered_inputs['''attention_mask'''].dtype
a :Any = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 13
| 94 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_vision_model'''
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Dict:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = stop_gradient
_SCREAMING_SNAKE_CASE = share_layernorm
_SCREAMING_SNAKE_CASE = remove_last_layer
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower_text_model'''
def __init__( self , A=5_0265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> Union[str, Any]:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
@classmethod
def snake_case_( cls , A , **A ) -> "PretrainedConfig":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.get_config_dict(A , **A )
if config_dict.get("""model_type""" ) == "bridgetower":
_SCREAMING_SNAKE_CASE = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bridgetower'''
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Tuple:
# TODO: remove this once the Hub files are updated.
_SCREAMING_SNAKE_CASE = kwargs.pop("""text_config_dict""" , A )
_SCREAMING_SNAKE_CASE = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
_SCREAMING_SNAKE_CASE = share_cross_modal_transformer_layers
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = share_link_tower_layers
_SCREAMING_SNAKE_CASE = link_tower_type
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = tie_word_embeddings
_SCREAMING_SNAKE_CASE = init_layernorm_from_vision_encoder
if text_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_SCREAMING_SNAKE_CASE = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_SCREAMING_SNAKE_CASE = BridgeTowerTextConfig(**A )
_SCREAMING_SNAKE_CASE = BridgeTowerVisionConfig(**A )
@classmethod
def snake_case_( cls , A , A , **A ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 58 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase_ = grid[0]
for row_n in range(1 , len(__UpperCamelCase ) ):
UpperCAmelCase_ = grid[row_n]
UpperCAmelCase_ = fill_row(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list , __UpperCamelCase : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
UpperCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 177 | 1 |
'''simple docstring'''
from random import randint, random
def a ( __a , __a , __a , __a = False , __a = False , __a = 5 , ) -> list:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
UpperCamelCase__ :int = 0
UpperCamelCase__ :Optional[Any] = max(lowercase__ , 0 )
while i < number_of_cells:
UpperCamelCase__ :int = (
randint(0 , lowercase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a ( __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Dict = highway_now[car_index + 1 :]
for cell in range(len(lowercase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase__ , -1 )
def a ( __a , __a , __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :Tuple = len(lowercase__ )
# Beforce calculations, the highway is empty
UpperCamelCase__ :Union[str, Any] = [-1] * number_of_cells
for car_index in range(lowercase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCamelCase__ :List[Any] = min(highway_now[car_index] + 1 , lowercase__ )
# Number of empty cell before the next car
UpperCamelCase__ :str = get_distance(lowercase__ , lowercase__ ) - 1
# We can't have the car causing an accident
UpperCamelCase__ :Union[str, Any] = min(next_highway[car_index] , lowercase__ )
if random() < probability:
# Randomly, a driver will slow down
UpperCamelCase__ :Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a ( __a , __a , __a , __a ) -> list:
'''simple docstring'''
UpperCamelCase__ :List[Any] = len(highway[0] )
for i in range(lowercase__ ):
UpperCamelCase__ :List[Any] = update(highway[i] , lowercase__ , lowercase__ )
UpperCamelCase__ :Any = [-1] * number_of_cells
for car_index in range(lowercase__ ):
UpperCamelCase__ :Any = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCamelCase__ :Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCamelCase__ :Optional[int] = speed
highway.append(lowercase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_A = logging.get_logger(__name__)
_A = {'''vocab_file''': '''spiece.model'''}
_A = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_A = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_A = 0
_A = 1
_A = 2
_A = 3
_A = 4
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = """left"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase=["<eop>", "<eod>"] , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ = 3
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = remove_space
UpperCamelCase_ = keep_accents
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.remove_space:
UpperCamelCase_ = """ """.join(inputs.strip().split() )
else:
UpperCamelCase_ = inputs
UpperCamelCase_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase_ = unicodedata.normalize("""NFKD""" , __UpperCamelCase )
UpperCamelCase_ = """""".join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
UpperCamelCase_ = outputs.lower()
return outputs
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.preprocess_text(__UpperCamelCase )
UpperCamelCase_ = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
UpperCamelCase_ = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase_ = cur_pieces[1:]
else:
UpperCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = kwargs.pop("""use_source_tokenizer""" , __UpperCamelCase )
UpperCamelCase_ = self.convert_ids_to_tokens(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase_ = []
UpperCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
UpperCamelCase_ = []
sub_texts.append(__UpperCamelCase )
else:
current_sub_text.append(__UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase_ = """""".join(__UpperCamelCase )
UpperCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase_ = self.clean_up_tokenization(__UpperCamelCase )
return clean_text
else:
return text
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 261 |
import comet # From: unbabel-comet
import torch
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.config_name == "default":
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
UpperCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if gpus is None:
UpperCamelCase_ = 1 if torch.cuda.is_available() else 0
UpperCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references}
UpperCamelCase_ = [dict(zip(__UpperCamelCase , __UpperCamelCase ) ) for t in zip(*data.values() )]
UpperCamelCase_ , UpperCamelCase_ = self.scorer.predict(__UpperCamelCase , gpus=__UpperCamelCase , progress_bar=__UpperCamelCase )
return {"mean_score": mean_score, "scores": scores}
| 261 | 1 |
"""simple docstring"""
import pytest
lowercase__ = """__dummy_dataset1__"""
lowercase__ = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = dataset_loading_script_name
lowerCAmelCase_ : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__UpperCamelCase )
lowerCAmelCase_ : str = script_dir / f'''{script_name}.py'''
with open(__UpperCamelCase , "w" ) as f:
f.write(__UpperCamelCase )
return str(__UpperCamelCase )
| 241 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__UpperCamelCase , 2 ) - pow(__UpperCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__UpperCamelCase , 2 ) + pow(__UpperCamelCase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : int = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "blip_text_model"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=30524 , SCREAMING_SNAKE_CASE : Optional[Any]=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=768 , SCREAMING_SNAKE_CASE : Tuple=3072 , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=8 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : Dict=1e-12 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : List[str]=30522 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Union[str, Any]=102 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , sep_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_A : Tuple = vocab_size
_A : Any = hidden_size
_A : Tuple = encoder_hidden_size
_A : List[str] = intermediate_size
_A : List[str] = projection_dim
_A : Any = hidden_dropout_prob
_A : Optional[int] = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : Optional[Any] = max_position_embeddings
_A : int = layer_norm_eps
_A : str = hidden_act
_A : List[Any] = initializer_range
_A : Tuple = attention_probs_dropout_prob
_A : str = is_decoder
_A : Optional[Any] = use_cache
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : Any):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE)
_A , _A : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
_A : Tuple = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "blip_vision_model"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : str=3072 , SCREAMING_SNAKE_CASE : Dict=512 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : List[str]=384 , SCREAMING_SNAKE_CASE : Optional[int]=16 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=1e-5 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : str=1e-10 , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE)
_A : str = hidden_size
_A : Tuple = intermediate_size
_A : str = projection_dim
_A : Optional[int] = num_hidden_layers
_A : Optional[int] = num_attention_heads
_A : Tuple = patch_size
_A : Optional[int] = image_size
_A : Any = initializer_range
_A : List[Any] = attention_dropout
_A : int = layer_norm_eps
_A : str = hidden_act
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : List[str]):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE)
_A , _A : Tuple = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type') == "blip":
_A : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "blip"
a = True
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=512 , SCREAMING_SNAKE_CASE : List[str]=2.6592 , SCREAMING_SNAKE_CASE : Optional[int]=256 , **SCREAMING_SNAKE_CASE : Any , ):
super().__init__(**SCREAMING_SNAKE_CASE)
if text_config is None:
_A : Optional[Any] = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.')
if vision_config is None:
_A : Dict = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.')
_A : Tuple = BlipTextConfig(**SCREAMING_SNAKE_CASE)
_A : Optional[Any] = BlipVisionConfig(**SCREAMING_SNAKE_CASE)
_A : Any = self.vision_config.hidden_size
_A : int = projection_dim
_A : Dict = logit_scale_init_value
_A : Any = 1.0
_A : List[str] = 0.02
_A : Tuple = image_text_hidden_size
@classmethod
def A ( cls : Dict , SCREAMING_SNAKE_CASE : BlipTextConfig , SCREAMING_SNAKE_CASE : BlipVisionConfig , **SCREAMING_SNAKE_CASE : List[str]):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : Tuple = copy.deepcopy(self.__dict__)
_A : str = self.text_config.to_dict()
_A : List[str] = self.vision_config.to_dict()
_A : List[Any] = self.__class__.model_type
return output
| 227 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Optional[int] = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 227 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class _snake_case :
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = []
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ):
return self.node_position[vertex]
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Any = pos
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE:Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE:int = 2 * start + 1
else:
SCREAMING_SNAKE_CASE:str = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = temp, tempa
SCREAMING_SNAKE_CASE:Optional[int] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,SCREAMING_SNAKE_CASE__ )
self.top_to_bottom(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE:Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE:Optional[Any] = heap[parent]
SCREAMING_SNAKE_CASE:int = position[parent]
self.set_position(position[parent] ,SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:str = val
SCREAMING_SNAKE_CASE:Optional[Any] = temp
self.set_position(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
break
SCREAMING_SNAKE_CASE:Dict = parent
else:
SCREAMING_SNAKE_CASE:str = val
SCREAMING_SNAKE_CASE:str = temp
self.set_position(SCREAMING_SNAKE_CASE__ ,0 )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[str] = len(SCREAMING_SNAKE_CASE__ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE__ ,-1 ,-1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Any = positions[0]
SCREAMING_SNAKE_CASE:int = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE__ ,0 ,len(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
return temp
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = Heap()
SCREAMING_SNAKE_CASE:Dict = [0] * len(snake_case )
SCREAMING_SNAKE_CASE:Tuple = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE:Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE:Optional[Any] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
SCREAMING_SNAKE_CASE:Dict = []
SCREAMING_SNAKE_CASE:Any = 1
SCREAMING_SNAKE_CASE:int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
SCREAMING_SNAKE_CASE:Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE:Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
SCREAMING_SNAKE_CASE:List[Any] = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A_ = int(input("Enter number of edges: ").strip())
A_ = defaultdict(list)
for _ in range(edges_number):
A_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 139 |
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = len(snake_case )
for _ in range(snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A_ = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 139 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 370 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase ):
A : List[Any] = R"\w+[.]\d+"
A : Optional[Any] = re.findall(_lowerCamelCase , _lowerCamelCase )
for pat in pats:
A : int = key.replace(_lowerCamelCase , "_".join(pat.split("." ) ) )
return key
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A : List[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A : List[Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
A : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : List[str] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=42 ):
# Step 1: Convert pytorch tensor to numpy
A : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A : Dict = flax_model.init_weights(PRNGKey(_lowerCamelCase ) )
A : Dict = flatten_dict(_lowerCamelCase )
A : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : Tuple = rename_key(_lowerCamelCase )
A : List[str] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
A , A : str = rename_key_and_reshape_tensor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A : Union[str, Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase ) | 256 | 0 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : int ) ->str:
lowerCamelCase__ : List[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : str =FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case_ )
lowerCamelCase__ : Union[str, Any] =checkpoints.load_tax_checkpoint(snake_case_ )
lowerCamelCase__ : Union[str, Any] ='wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
lowerCamelCase__ : Union[str, Any] ='SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : str ='LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Dict ='TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Any =f"""layers_{str(snake_case_ )}"""
# Self-Attention
lowerCamelCase__ : int =tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
lowerCamelCase__ : Optional[int] =tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
lowerCamelCase__ : Union[str, Any] =tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
lowerCamelCase__ : Tuple =tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Tuple =tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
lowerCamelCase__ : Union[str, Any] =tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
lowerCamelCase__ : int =tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : int =tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : List[str] =tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : List[str] =tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : Any =tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Optional[int] =flax_model.params['encoder']['block'][str(snake_case_ )]['layer']
lowerCamelCase__ : Optional[Any] =tax_attention_key
lowerCamelCase__ : List[str] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : int =tax_attention_value
lowerCamelCase__ : Optional[Any] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Tuple =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Tuple =tax_mlp_wi_a
lowerCamelCase__ : Optional[int] =tax_mlp_wi_a
else:
lowerCamelCase__ : Optional[Any] =tax_mlp_wi
lowerCamelCase__ : Optional[int] =tax_mlp_wo
lowerCamelCase__ : str =tax_mlp_layer_norm
lowerCamelCase__ : List[str] =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Optional[int] =tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Optional[Any] =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Union[str, Any] =tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
lowerCamelCase__ : List[str] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : List[str] =tax_model['target']['encoder']['encoder_norm']['scale']
lowerCamelCase__ : Optional[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : List[Any] =f"""layers_{str(snake_case_ )}"""
# Self-Attention
lowerCamelCase__ : List[str] =tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
lowerCamelCase__ : Any =tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
lowerCamelCase__ : int =tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
lowerCamelCase__ : int =tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
lowerCamelCase__ : Tuple =tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
lowerCamelCase__ : Optional[Any] =tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_module['key']['kernel']
lowerCamelCase__ : int =tax_enc_dec_attention_module['out']['kernel']
lowerCamelCase__ : Any =tax_enc_dec_attention_module['query']['kernel']
lowerCamelCase__ : Optional[int] =tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
lowerCamelCase__ : List[Any] =tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
lowerCamelCase__ : Optional[int] =tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : Dict =tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : int =tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : Tuple =tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : int =tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Tuple =flax_model.params['decoder']['block'][str(snake_case_ )]['layer']
lowerCamelCase__ : Tuple =tax_attention_key
lowerCamelCase__ : List[Any] =tax_attention_out
lowerCamelCase__ : List[Any] =tax_attention_query
lowerCamelCase__ : Dict =tax_attention_value
lowerCamelCase__ : Union[str, Any] =tax_pre_attention_layer_norm
lowerCamelCase__ : Union[str, Any] =tax_enc_dec_attention_key
lowerCamelCase__ : Tuple =tax_enc_dec_attention_out
lowerCamelCase__ : List[Any] =tax_enc_dec_attention_query
lowerCamelCase__ : Any =tax_enc_dec_attention_value
lowerCamelCase__ : Tuple =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Union[str, Any] =tax_mlp_wi_a
lowerCamelCase__ : Tuple =tax_mlp_wi_a
else:
lowerCamelCase__ : str =tax_mlp_wi
lowerCamelCase__ : List[Any] =tax_mlp_wo
lowerCamelCase__ : str =txa_mlp_layer_norm
lowerCamelCase__ : Optional[int] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Dict =tax_model['target']['decoder']['decoder_norm']['scale']
lowerCamelCase__ : Optional[int] =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : int =tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : str =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : List[str] =tax_model['target']['token_embedder']['embedding']
lowerCamelCase__ : Optional[int] =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : str =tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(snake_case_ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 126 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids | 126 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = """▁"""
snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = BertGenerationTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A_ : Optional[int] = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : str = '''<s>'''
A_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_lowerCamelCase ) , 1002 )
def _a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : int = BertGenerationTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
A_ : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
A_ : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _a ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def _a ( self : str ):
"""simple docstring"""
A_ : List[str] = '''Hello World!'''
A_ : Any = [18536, 2260, 101]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _a ( self : int ):
"""simple docstring"""
A_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
A_ : List[str] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ : Dict = ''' '''.join(_lowerCamelCase )
A_ : List[Any] = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
A_ : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
A_ : List[Any] = BertGenerationConfig()
A_ : List[Any] = BertGenerationEncoder(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 370 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ : List[str] = TextStreamer(_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Dict = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Dict = -1
A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : str = tokenizer.decode(greedy_ids[0] )
A_ : int = TextIteratorStreamer(_lowerCamelCase )
A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
A_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : List[str] = -1
A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase )
A_ : Tuple = greedy_ids[:, input_ids.shape[1] :]
A_ : Tuple = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ : Any = cs.out[:-1]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase )
A_ : List[Any] = -1
A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ : List[str] = cs.out[:-1] # Remove the final "\n"
A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase )
A_ : Union[str, Any] = -1
A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase )
A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 )
A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCamelCase ):
A_ : str = ''''''
for new_text in streamer:
streamer_text += new_text
| 4 | 0 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A_ , A_ : List[str] = array[indexa], array[indexa]
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None:
if length > 1:
A_ : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None:
if length > 1:
A_ : Optional[int] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Dict:
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Optional[Any] = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> int:
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Union[str, Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :Union[str, Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Optional[int] = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :int = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE ( A__ ):
pass
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Any = data
lowerCAmelCase_ :Node | None = None
def __iter__( self ) -> Dict:
lowerCAmelCase_ :int = self
lowerCAmelCase_ :Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
lowerCAmelCase_ :Tuple = node.next_node
@property
def __lowerCAmelCase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCAmelCase = Node(1)
__UpperCAmelCase = Node(2)
__UpperCAmelCase = Node(3)
__UpperCAmelCase = Node(4)
print(root_node.has_loop) # False
__UpperCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
print(root_node.has_loop) # False
__UpperCAmelCase = Node(1)
print(root_node.has_loop) # False
| 84 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : bool = field(default=a_ , metadata={"help": "Whether tp freeze the encoder."} )
A : bool = field(default=a_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
A : Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
A : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
A : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
A : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
A : Optional[str] = field(default=a_ , metadata={"help": "Source language id for translation."} )
A : Optional[str] = field(default=a_ , metadata={"help": "Target language id for translation."} )
A : Optional[int] = field(default=a_ , metadata={"help": "# num_beams to use for evaluation."} )
A : bool = field(
default=a_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[str] ) -> str:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(snake_case , os.path.join(snake_case , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(snake_case , snake_case , snake_case ):
assert hasattr(snake_case , snake_case ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case , snake_case , getattr(snake_case , snake_case ) )
a : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a : List[str] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case , snake_case ):
a : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a : Any = SeqaSeqDataset
# Get datasets
a : Any = (
dataset_class(
snake_case , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
a : int = (
dataset_class(
snake_case , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a : int = (
dataset_class(
snake_case , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a : Optional[Any] = (
build_compute_metrics_fn(data_args.task , snake_case ) if training_args.predict_with_generate else None
)
a : Optional[int] = SeqaSeqTrainer(
model=snake_case , args=snake_case , data_args=snake_case , train_dataset=snake_case , eval_dataset=snake_case , data_collator=SeqaSeqDataCollator(
snake_case , snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case , tokenizer=snake_case , )
a : str = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
a : Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a : int = train_result.metrics
a : int = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a : Dict = trainer.evaluate(metric_key_prefix='val' )
a : str = data_args.n_val
a : Optional[Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
a : List[Any] = trainer.predict(test_dataset=snake_case , metric_key_prefix='test' )
a : Dict = test_output.metrics
a : Optional[int] = data_args.n_test
if trainer.is_world_process_zero():
a : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , snake_case , training_args.output_dir )
all_metrics.update(snake_case )
if training_args.predict_with_generate:
a : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
a : List[Any] = lmap(str.strip , snake_case )
write_txt_file(snake_case , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(snake_case , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 351 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 0 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = str(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 9 and set(SCREAMING_SNAKE_CASE__ ) == set('''123456789''' )
def A_ ( ):
"""simple docstring"""
for base_num in range(99_99, 49_99, -1 ):
_a = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
for base_num in range(3_33, 99, -1 ):
_a = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }') | 320 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase :str = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None ):
UpperCamelCase :Dict = []
for epoch in range(SCREAMING_SNAKE_CASE__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase :Optional[Any] = batch
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> str:
super().__init__()
UpperCamelCase :Optional[int] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase :int = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return x * self.a + self.b
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Tuple = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Dict = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[str] = DummyModel()
UpperCamelCase :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase :Dict = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[int] = optimizer.state_dict()
UpperCamelCase :Optional[int] = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Any = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :List[Any] = dummy_dataloaders()
UpperCamelCase :List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Tuple = model.a.item(), model.b.item()
UpperCamelCase :Tuple = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :int = dummy_dataloaders()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) :List[str] = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
UpperCamelCase :Any = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[int] = model.a.item(), model.b.item()
UpperCamelCase :Any = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Union[str, Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :str = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = torch.tensor([1, 2, 3] )
UpperCamelCase :Any = torch.tensor([2, 3, 4] )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :Optional[Any] = torch.optim.Adam(net.parameters() )
UpperCamelCase :Optional[Any] = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase :Any = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
UpperCamelCase , UpperCamelCase :Any = dummy_dataloaders()
UpperCamelCase :Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCamelCase :int = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
UpperCamelCase :Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = """/tmp/accelerate/state_checkpointing"""
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 259 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[str] = ['input_features', 'is_longer']
def __init__( self ,__UpperCAmelCase=64 ,__UpperCAmelCase=4_80_00 ,__UpperCAmelCase=4_80 ,__UpperCAmelCase=10 ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=False ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 1_40_00 ,__UpperCAmelCase = None ,__UpperCAmelCase = "fusion" ,__UpperCAmelCase = "repeatpad" ,**__UpperCAmelCase ,) -> Dict:
super().__init__(
feature_size=__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,padding_value=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,**__UpperCAmelCase ,)
A__ = top_db
A__ = truncation
A__ = padding
A__ = fft_window_size
A__ = (fft_window_size >> 1) + 1
A__ = hop_length
A__ = max_length_s
A__ = max_length_s * sampling_rate
A__ = sampling_rate
A__ = frequency_min
A__ = frequency_max
A__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCAmelCase ,min_frequency=__UpperCAmelCase ,max_frequency=__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,norm=__UpperCAmelCase ,mel_scale='htk' ,)
A__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCAmelCase ,min_frequency=__UpperCAmelCase ,max_frequency=__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,)
def snake_case__ ( self ) -> Dict[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> np.ndarray:
A__ = spectrogram(
__UpperCAmelCase ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__UpperCAmelCase ,log_mel='dB' ,)
return log_mel_spectrogram.T
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
A__ = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A__ = [0]
# randomly choose index for each part
A__ = np.random.choice(ranges[0] )
A__ = np.random.choice(ranges[1] )
A__ = np.random.choice(ranges[2] )
A__ = mel[idx_front : idx_front + chunk_frames, :]
A__ = mel[idx_middle : idx_middle + chunk_frames, :]
A__ = mel[idx_back : idx_back + chunk_frames, :]
A__ = torch.tensor(mel[None, None, :] )
A__ = torch.nn.functional.interpolate(
__UpperCAmelCase ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=__UpperCAmelCase )
A__ = mel_shrink[0][0].numpy()
A__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A__ = len(__UpperCAmelCase ) - max_length
A__ = np.random.randint(0 ,overflow + 1 )
A__ = waveform[idx : idx + max_length]
A__ = self._np_extract_fbank_features(__UpperCAmelCase ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A__ = self._np_extract_fbank_features(__UpperCAmelCase ,self.mel_filters )
A__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A__ = np.stack([mel, mel, mel, mel] ,axis=0 )
A__ = False
else:
A__ = self._random_mel_fusion(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
A__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A__ = int(max_length / len(__UpperCAmelCase ) )
A__ = np.stack(np.tile(__UpperCAmelCase ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A__ = int(max_length / len(__UpperCAmelCase ) )
A__ = np.stack(np.tile(__UpperCAmelCase ,__UpperCAmelCase ) )
A__ = np.pad(__UpperCAmelCase ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 )
if truncation == "fusion":
A__ = self._np_extract_fbank_features(__UpperCAmelCase ,self.mel_filters )
A__ = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A__ = self._np_extract_fbank_features(__UpperCAmelCase ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> BatchFeature:
A__ = truncation if truncation is not None else self.truncation
A__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ = isinstance(__UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(__UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(__UpperCAmelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase ,np.ndarray ):
A__ = np.asarray(__UpperCAmelCase ,dtype=np.floataa )
elif isinstance(__UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [np.asarray(__UpperCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
A__ = [
self._get_input_mel(__UpperCAmelCase ,max_length if max_length else self.nb_max_samples ,__UpperCAmelCase ,__UpperCAmelCase )
for waveform in raw_speech
]
A__ = []
A__ = []
for mel, longer in padded_inputs:
input_mel.append(__UpperCAmelCase )
is_longer.append(__UpperCAmelCase )
if truncation == "fusion" and sum(__UpperCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A__ = np.random.randint(0 ,len(__UpperCAmelCase ) )
A__ = True
if isinstance(input_mel[0] ,__UpperCAmelCase ):
A__ = [np.asarray(__UpperCAmelCase ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A__ = [[longer] for longer in is_longer]
A__ = {'input_features': input_mel, 'is_longer': is_longer}
A__ = BatchFeature(__UpperCAmelCase )
if return_tensors is not None:
A__ = input_features.convert_to_tensors(__UpperCAmelCase )
return input_features
| 154 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> str:
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 20}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
def __A ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def __A ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'crop_size' ) )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 113 | """simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase__ : str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({"labels": ClassLabel})
lowerCamelCase__ : str = "text"
lowerCamelCase__ : str = "labels"
def _UpperCAmelCase ( self , a ) -> Tuple:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : Optional[Any] = copy.deepcopy(self )
lowercase__ : Optional[Any] = self.label_schema.copy()
lowercase__ : Any = features[self.label_column]
lowercase__ : Optional[Any] = label_schema
return task_template
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =IFImgaImgSuperResolutionPipeline
lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
lowercase : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
lowercase : List[Any] =PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 6 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 1 |
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Union[str, Any] = name
__lowerCamelCase : Optional[int] = val
def __str__( self : str):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : str ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.val < other.val
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Any = self.build_heap(SCREAMING_SNAKE_CASE__)
def __getitem__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
return self.get_value(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
return (idx - 1) // 2
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return idx * 2 + 1
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return idx * 2 + 2
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple):
return self.heap_dict[key]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__) - 1
__lowerCamelCase : Any = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
for idx, i in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Tuple = idx
__lowerCamelCase : Optional[Any] = i.val
for i in range(SCREAMING_SNAKE_CASE__ ,-1 ,-1):
self.sift_down(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return array
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any):
while True:
__lowerCamelCase : str = self.get_left_child_idx(SCREAMING_SNAKE_CASE__) # noqa: E741
__lowerCamelCase : List[str] = self.get_right_child_idx(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = idx
if l < len(SCREAMING_SNAKE_CASE__) and array[l] < array[idx]:
__lowerCamelCase : Optional[Any] = l
if r < len(SCREAMING_SNAKE_CASE__) and array[r] < array[smallest]:
__lowerCamelCase : List[str] = r
if smallest != idx:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = array[smallest], array[idx]
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowerCamelCase : Optional[int] = smallest
else:
break
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowerCamelCase , __lowerCamelCase : int = self.heap[idx], self.heap[p]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowerCamelCase : str = p
__lowerCamelCase : int = self.get_parent_idx(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
return self.heap[0]
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : Tuple = self.heap[-1], self.heap[0]
__lowerCamelCase , __lowerCamelCase : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowerCamelCase : Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap)
return x
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
self.heap.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = len(self.heap) - 1
__lowerCamelCase : str = node.val
self.sift_up(len(self.heap) - 1)
def lowerCAmelCase ( self : Optional[int]):
return len(self.heap) == 0
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowerCamelCase : List[str] = new_value
__lowerCamelCase : Dict = new_value
self.sift_up(self.idx_of_element[node])
a =Node("""R""", -1)
a =Node("""B""", 6)
a =Node("""A""", 3)
a =Node("""X""", 1)
a =Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case : Tuple = str(bin(_snake_case ) )[2:] # remove the leading "0b"
__snake_case : List[Any] = str(bin(_snake_case ) )[2:]
__snake_case : Any = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
SCREAMING_SNAKE_CASE = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(UpperCamelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE = mam_aaa['''args''']
SCREAMING_SNAKE_CASE = mam_aaa['''model''']
SCREAMING_SNAKE_CASE = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = state_dict['''decoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(',' )]
SCREAMING_SNAKE_CASE = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=2_00 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 351 |
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase =False
try:
__UpperCAmelCase =_is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self : Dict , a : str = None , a : list = [] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = choices
__lowerCamelCase = prompt
if sys.platform == "win32":
__lowerCamelCase = '''*'''
else:
__lowerCamelCase = '''➔ '''
def SCREAMING_SNAKE_CASE__ ( self : str , a : int , a : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int ):
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Direction , a : int = 1 ):
"""simple docstring"""
__lowerCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = int(chr(self.current_selection ) )
__lowerCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__lowerCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase = int(builtins.input() )
except ValueError:
__lowerCamelCase = default_choice
else:
__lowerCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(a , '''\n''' )
return choice
| 67 |
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299 | 0 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000 ) -> int:
snake_case_ = 3
snake_case_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 233 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
snake_case : Union[str, Any] = get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any , _snake_case : int , _snake_case : Optional[int]=0 ) -> int:
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__magic_name__ : Optional[Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ : Optional[Any] = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__magic_name__ : List[Any] = os.path.join(_snake_case , _snake_case )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ : Optional[int] = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__magic_name__ : Tuple = os.path.join(_snake_case , _snake_case )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ : Any = os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(F'''Saving model to {ckpt_dir}''' )
__magic_name__ : Union[str, Any] = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_snake_case , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]=0 ) -> int:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__magic_name__ : int = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__magic_name__ : List[Any] = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
__magic_name__ : Any = torch.load(_snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__magic_name__ : str = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__magic_name__ : Optional[int] = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading model from {input_model_file}''' )
__magic_name__ : int = torch.load(_snake_case )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__magic_name__ : Any = (
os.path.join(_snake_case , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__magic_name__ : int = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_snake_case , storage_reader=dist_cp.FileSystemReader(_snake_case ) , planner=DefaultLoadPlanner() , )
__magic_name__ : str = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_snake_case )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple=0 ) -> int:
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__magic_name__ : Optional[int] = FSDP.optim_state_dict(_snake_case , _snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__magic_name__ : List[Any] = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__magic_name__ : Any = os.path.join(_snake_case , _snake_case )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_snake_case , _snake_case )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__magic_name__ : Optional[int] = os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_snake_case ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any]=0 ) -> int:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__magic_name__ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__magic_name__ : Optional[int] = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__magic_name__ : str = os.path.join(_snake_case , _snake_case )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__magic_name__ : Optional[int] = torch.load(_snake_case )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__magic_name__ : Dict = (
os.path.join(_snake_case , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__magic_name__ : List[str] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_snake_case ) , )
__magic_name__ : Union[str, Any] = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__magic_name__ : Dict = FSDP.optim_state_dict_to_load(_snake_case , _snake_case , _snake_case )
optimizer.load_state_dict(_snake_case )
| 281 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 1 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCamelCase__ ( _A ):
a : Any = model.config
a : str = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a : str = MBartConfig(
is_decoder=a__ , is_encoder_decoder=a__ , add_cross_attention=a__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=a__ , add_final_layer_norm=a__ , )
return encoder_config, decoder_config
def lowerCamelCase__ ( _A ):
if "encoder.model" in name:
a : Any = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
a : List[str] = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
a : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : List[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
a : Dict = '''encoder.''' + name
if "attn.proj" in name:
a : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
a : Optional[Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
a : Tuple = '''encoder.layernorm.bias'''
return name
def lowerCamelCase__ ( _A , _A ):
for key in orig_state_dict.copy().keys():
a : Optional[Any] = orig_state_dict.pop(a__ )
if "qkv" in key:
a : List[str] = key.split('.' )
a : Union[str, Any] = int(key_split[3] )
a : Dict = int(key_split[5] )
a : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : str = val[dim : dim * 2, :]
a : Optional[Any] = val[-dim:, :]
else:
a : List[Any] = val[:dim]
a : Any = val[dim : dim * 2]
a : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _A , _A=None , _A=False ):
a : Union[str, Any] = DonutModel.from_pretrained(a__ ).eval()
# load HuggingFace model
a : List[Any] = get_configs(a__ )
a : List[Any] = DonutSwinModel(a__ )
a : List[Any] = MBartForCausalLM(a__ )
a : List[Any] = VisionEncoderDecoderModel(encoder=a__ , decoder=a__ )
model.eval()
a : Any = original_model.state_dict()
a : str = convert_state_dict(a__ , a__ )
model.load_state_dict(a__ )
# verify results on scanned document
a : Dict = load_dataset('hf-internal-testing/example-documents' )
a : Optional[Any] = dataset['''test'''][0]['''image'''].convert('RGB' )
a : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(a__ , from_slow=a__ )
a : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a : int = DonutProcessor(a__ , a__ )
a : Dict = processor(a__ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
a : Union[str, Any] = '''When is the coffee break?'''
a : Any = task_prompt.replace('{user_input}' , a__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a : Union[str, Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a : int = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a : str = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a : Optional[int] = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a : List[str] = '''hello world'''
else:
raise ValueError('Model name not supported' )
a : Any = original_model.decoder.tokenizer(a__ , add_special_tokens=a__ , return_tensors='pt' )[
'''input_ids'''
]
a : Optional[Any] = original_model.encoder.model.patch_embed(a__ )
a : Optional[Any] = model.encoder.embeddings(a__ )
assert torch.allclose(a__ , a__ , atol=1E-3 )
# verify encoder hidden states
a : Dict = original_model.encoder(a__ )
a : int = model.encoder(a__ ).last_hidden_state
assert torch.allclose(a__ , a__ , atol=1E-2 )
# verify decoder hidden states
a : Optional[Any] = original_model(a__ , a__ , a__ ).logits
a : Union[str, Any] = model(a__ , decoder_input_ids=a__ ).logits
assert torch.allclose(a__ , a__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
lowerCAmelCase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCAmelCase: Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : List[str]=False ):
snake_case : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : Optional[int] = ""
else:
snake_case : Tuple = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case : List[str] = in_proj_bias[: config.hidden_size]
snake_case : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : str ):
snake_case : List[str] = dct.pop(lowercase__ )
snake_case : List[Any] = val
def UpperCamelCase__ ( ):
snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : Dict ):
snake_case : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case : Any = 1000
snake_case : Optional[int] = "huggingface/label-files"
snake_case : List[str] = "imagenet-1k-id2label.json"
snake_case : Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
snake_case : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[str] = {v: k for k, v in idalabel.items()}
snake_case : List[Any] = int(deit_name[-6:-4] )
snake_case : Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
snake_case : Optional[Any] = 192
snake_case : Union[str, Any] = 768
snake_case : List[str] = 12
snake_case : str = 3
elif deit_name[9:].startswith("small" ):
snake_case : Optional[Any] = 384
snake_case : int = 1536
snake_case : int = 12
snake_case : str = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
snake_case : Any = 1024
snake_case : Tuple = 4096
snake_case : int = 24
snake_case : int = 16
# load original model from timm
snake_case : Union[str, Any] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : Tuple = timm_model.state_dict()
snake_case : Dict = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
snake_case : int = DeiTForImageClassificationWithTeacher(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case : Optional[Any] = DeiTImageProcessor(size=lowercase__ , crop_size=config.image_size )
snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case : List[str] = encoding["pixel_values"]
snake_case : Tuple = model(lowercase__ )
snake_case : List[str] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase = datasets.logging.get_logger(__name__)
UpperCamelCase = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
UpperCamelCase = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
UpperCamelCase = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : str ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
if self.config_name == "default":
lowerCAmelCase__ = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
lowerCAmelCase__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[Any]:
if gpus is None:
lowerCAmelCase__ = 1 if torch.cuda.is_available() else 0
lowerCAmelCase__ = {"src": sources, "mt": predictions, "ref": references}
lowerCAmelCase__ = [dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) for t in zip(*data.values() )]
lowerCAmelCase__ , lowerCAmelCase__ = self.scorer.predict(SCREAMING_SNAKE_CASE__ , gpus=SCREAMING_SNAKE_CASE__ , progress_bar=SCREAMING_SNAKE_CASE__ )
return {"mean_score": mean_score, "scores": scores}
| 221 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
return (args[0] + 1,) + args[1:], kwargs
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
return output + 1
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : List[str] ) -> Tuple:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE__ )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : Union[str, Any] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , append=SCREAMING_SNAKE_CASE__ )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE__ )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE__ , "_old_forward" ) )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(x + 1 )
lowerCAmelCase__ = test_model(x + 2 )
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-5 )
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , output + 2 , atol=1e-5 )
def a ( self : Optional[int] ) -> int:
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ = True
lowerCAmelCase__ = test_model(SCREAMING_SNAKE_CASE__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE__ , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , torch.device(0 ) )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
lowerCAmelCase__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , offload_buffers=SCREAMING_SNAKE_CASE__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE__ , execution_device=SCREAMING_SNAKE_CASE__ , offload=SCREAMING_SNAKE_CASE__ , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 221 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Tuple , __a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
UpperCAmelCase_ = nn.ModuleList(__a )
def _lowercase (self : Tuple , __a : torch.FloatTensor , __a : Union[torch.Tensor, float, int] , __a : torch.Tensor , __a : List[torch.tensor] , __a : List[float] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , __a : Optional[Dict[str, Any]] = None , __a : bool = False , __a : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_ = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = down_samples, mid_sample
else:
UpperCAmelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase (self : List[str] , __a : Union[str, os.PathLike] , __a : bool = True , __a : Callable = None , __a : bool = False , __a : Optional[str] = None , ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
UpperCAmelCase_ = model_path_to_save + f"""_{idx}"""
@classmethod
def _lowercase (cls : Tuple , __a : Optional[Union[str, os.PathLike]] , **__a : List[Any] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ = pretrained_model_path
while os.path.isdir(__a ):
UpperCAmelCase_ = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
UpperCAmelCase_ = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__a )} controlnets loaded from {pretrained_model_path}.""" )
if len(__a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__a )
| 1 | '''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Any ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Dict:
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Tuple = 3
lowerCamelCase__ : str = (3_2, 3_2)
lowerCamelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __lowerCamelCase ( self : Any ) ->List[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self : List[str] ) ->str:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
torch.manual_seed(0 )
lowerCamelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(A )
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
def extract(*A : Union[str, Any] , **A : Optional[Any] ):
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ) ->int:
lowerCamelCase__ : List[Any] = torch.ones([0] )
def __lowerCamelCase ( self : str , A : Tuple ) ->Dict:
self.pixel_values.to(A )
return self
return Out()
return extract
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[str] = self.dummy_cond_unet
lowerCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase__ : str = self.dummy_vae
lowerCamelCase__ : List[Any] = self.dummy_text_encoder
lowerCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : int = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase__ : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : str = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ : Tuple = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : Optional[int] = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : str = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : Tuple = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : Tuple = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : Dict ) ->Optional[int]:
lowerCamelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[str] = self.dummy_cond_unet
lowerCamelCase__ : List[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase__ : Optional[Any] = self.dummy_vae
lowerCamelCase__ : str = self.dummy_text_encoder
lowerCamelCase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : Tuple = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase__ : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ : Any = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : List[Any] = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowerCamelCase__ : Any = output.images
lowerCamelCase__ : List[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : List[Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : int = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : str ) ->int:
lowerCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowerCamelCase__ : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase__ : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCamelCase ( self : int ) ->str:
lowerCamelCase__ : Any = self.dummy_cond_unet
lowerCamelCase__ : Dict = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase__ : Union[str, Any] = self.dummy_vae
lowerCamelCase__ : int = self.dummy_text_encoder
lowerCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowerCamelCase__ : int = unet.half()
lowerCamelCase__ : List[Any] = vae.half()
lowerCamelCase__ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : Tuple = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase__ : Tuple = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ : str = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : List[str] ) ->Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A )
lowerCamelCase__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase__ : Any = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : int = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowerCamelCase__ : List[str] = 4_0_0_3_6_6_0_3_4_6
lowerCamelCase__ : List[str] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase__ : Dict = torch.manual_seed(A )
lowerCamelCase__ : List[Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
lowerCamelCase__ : Any = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase__ : str = torch.manual_seed(A )
lowerCamelCase__ : int = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase__ : Any = output.images
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : Optional[int] ) ->Any:
lowerCamelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=A )
lowerCamelCase__ : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase__ : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : str = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowerCamelCase__ : int = 2_7_3_4_9_7_1_7_5_5
lowerCamelCase__ : List[str] = 7
lowerCamelCase__ : Dict = torch.manual_seed(A )
lowerCamelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCamelCase__ : str = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase__ : Any = torch.manual_seed(A )
lowerCamelCase__ : List[Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase__ : Union[str, Any] = output.images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[int] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowerCamelCase__ : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Any = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowerCamelCase__ : Tuple = 1_0_4_4_3_5_5_2_3_4
lowerCamelCase__ : Any = 1_2
lowerCamelCase__ : int = torch.manual_seed(A )
lowerCamelCase__ : Dict = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowerCamelCase__ : Optional[Any] = output.images
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Any = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(A )
lowerCamelCase__ : List[Any] = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase__ : Any = output.images
lowerCamelCase__ : List[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : str = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 359 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : str = None ) -> None:
__lowercase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__lowercase = v.half()
if save_path is None: # overwrite src_path
__lowercase = src_path
torch.save(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 325 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCAmelCase = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__UpperCAmelCase = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__UpperCAmelCase = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def _lowerCamelCase ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowerCamelCase ( self : Optional[Any] , _a : Union[str, Any] , _a : List[Any] , _a : str=None , _a : Tuple=None , _a : Any=None , _a : List[Any]=None , _a : Optional[Any]="auto" , _a : str=-1 , _a : List[Any]=0.9 , _a : List[Any]=5 , _a : Optional[Any]=5_0_0 , _a : List[Any]="gpt2-large" , _a : str=-1 , _a : Optional[Any]=1_0_2_4 , _a : Any=2_5 , _a : Dict=5 , _a : Optional[Any]=True , _a : List[Any]=2_5 , ):
a__: List[Any] =compute_mauve(
p_text=__lowerCAmelCase , q_text=__lowerCAmelCase , p_features=__lowerCAmelCase , q_features=__lowerCAmelCase , p_tokens=__lowerCAmelCase , q_tokens=__lowerCAmelCase , num_buckets=__lowerCAmelCase , pca_max_data=__lowerCAmelCase , kmeans_explained_var=__lowerCAmelCase , kmeans_num_redo=__lowerCAmelCase , kmeans_max_iter=__lowerCAmelCase , featurize_model_name=__lowerCAmelCase , device_id=__lowerCAmelCase , max_text_length=__lowerCAmelCase , divergence_curve_discretization_size=__lowerCAmelCase , mauve_scaling_factor=__lowerCAmelCase , verbose=__lowerCAmelCase , seed=__lowerCAmelCase , )
return out
| 360 |
def __lowerCamelCase ( __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
a__: List[str] =F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
a__: Union[str, Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
a__: List[Any] =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a : Dict = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = ['input_features', 'attention_mask']
def __init__( self : int , __lowercase : int=80 , __lowercase : str=16000 , __lowercase : Optional[Any]=80 , __lowercase : List[Any]=0.0 , __lowercase : Union[str, Any]=True , __lowercase : Tuple=True , __lowercase : Union[str, Any]=True , **__lowercase : List[Any] , ) -> Tuple:
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
__UpperCAmelCase : Any = num_mel_bins
__UpperCAmelCase : List[Any] = do_ceptral_normalize
__UpperCAmelCase : List[str] = normalize_means
__UpperCAmelCase : List[Any] = normalize_vars
__UpperCAmelCase : List[str] = True
def UpperCAmelCase ( self : int , __lowercase : np.ndarray , ) -> np.ndarray:
__UpperCAmelCase : Union[str, Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[str] = ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase ( __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[bool] = True , __lowercase : Optional[bool] = True , __lowercase : float = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
__UpperCAmelCase : str = x[:input_length].mean(axis=0 )
__UpperCAmelCase : int = np.subtract(__lowercase , __lowercase )
if normalize_vars:
__UpperCAmelCase : Union[str, Any] = x[:input_length].std(axis=0 )
__UpperCAmelCase : int = np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
__UpperCAmelCase : Tuple = padding_value
# make sure array is in float32
__UpperCAmelCase : int = x.astype(np.floataa )
return x
def UpperCAmelCase ( self : Tuple , __lowercase : List[np.ndarray] , __lowercase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
__UpperCAmelCase : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__( self : int , __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , **__lowercase : int , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase : List[str] = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase : Dict = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Any = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
__UpperCAmelCase : int = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : int = [raw_speech]
# extract fbank features
__UpperCAmelCase : Dict = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCAmelCase : List[Any] = BatchFeature({"""input_features""": features} )
__UpperCAmelCase : Optional[int] = self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
__UpperCAmelCase : Optional[Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __lowercase ):
__UpperCAmelCase : Tuple = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
__UpperCAmelCase : Tuple = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__UpperCAmelCase : List[Any] = [np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCAmelCase : Tuple = (
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCAmelCase : str = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__lowercase )
if return_tensors is not None:
__UpperCAmelCase : List[str] = padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 114 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114 | 1 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCamelCase ( UpperCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = 2
while True:
if is_prime(lowerCamelCase_ ):
yield num
num += 1
def _UpperCamelCase ( UpperCamelCase__ = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda UpperCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 356 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A =logging.get_logger(__name__)
__A ={'vocab_file': 'spiece.model'}
__A ={
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<sep>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<cls>" , _lowerCamelCase="<mask>" , _lowerCamelCase=["<eop>", "<eod>"] , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else mask_token
UpperCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCAmelCase__ : List[str] = 3
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Tuple = remove_space
UpperCAmelCase__ : List[str] = keep_accents
UpperCAmelCase__ : Any = vocab_file
UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCamelCase)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""")
UpperCAmelCase__ : Union[str, Any] = jieba
UpperCAmelCase__ : int = str.maketrans(""" \n""" , """\u2582\u2583""")
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self):
return len(self.sp_model)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
UpperCAmelCase__ : List[str] = self.__dict__.copy()
UpperCAmelCase__ : Optional[int] = None
return state
def __setstate__( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self , _lowerCamelCase):
if self.remove_space:
UpperCAmelCase__ : Tuple = """ """.join(inputs.strip().split())
else:
UpperCAmelCase__ : str = inputs
UpperCAmelCase__ : List[Any] = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""")
if not self.keep_accents:
UpperCAmelCase__ : str = unicodedata.normalize("""NFKD""" , _lowerCamelCase)
UpperCAmelCase__ : Any = """""".join([c for c in outputs if not unicodedata.combining(_lowerCamelCase)])
if self.do_lower_case:
UpperCAmelCase__ : Optional[int] = outputs.lower()
return outputs
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[str] = self.preprocess_text(_lowerCamelCase)
UpperCAmelCase__ : Dict = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = []
for piece in pieces:
if len(_lowerCamelCase) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit():
UpperCAmelCase__ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , """"""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase__ : Optional[int] = cur_pieces[1:]
else:
UpperCAmelCase__ : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowerCamelCase)
else:
new_pieces.append(_lowerCamelCase)
return new_pieces
def snake_case__ ( self , _lowerCamelCase):
return self.sp_model.PieceToId(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.sp_model.IdToPiece(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : str = """""".join(_lowerCamelCase).replace(_lowerCamelCase , """ """).strip()
return out_string
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase)
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase)) + [1] + ([0] * len(_lowerCamelCase)) + [1, 1]
return ([0] * len(_lowerCamelCase)) + [1, 1]
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not os.path.isdir(_lowerCamelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
UpperCAmelCase__ : str = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCamelCase , """wb""") as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase)
return (out_vocab_file,)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = super()._decode(*_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Any = text.replace(""" """ , """""").replace("""\u2582""" , """ """).replace("""\u2583""" , """\n""")
return text | 283 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A( a , a , unittest.TestCase ):
snake_case_ = IFImgaImgSuperResolutionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> int:
'''simple docstring'''
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__a = floats_tensor((1, 3, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 6 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A( a ):
snake_case_ = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''text''': Value('''string''' )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"} | 6 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A_ : List[Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''pixel_values''']
def __init__( self , A__ = True , A__ = 1 / 255 , A__ = True , A__ = 8 , **A__ , ):
super().__init__(**A__ )
A__ : str = do_rescale
A__ : Optional[Any] = rescale_factor
A__ : Optional[int] = do_pad
A__ : str = pad_size
def __A ( self , A__ , A__ , A__ = None , **A__ ):
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ = None ):
A__ : Dict = get_image_size(A__ )
A__ : Tuple = (old_height // size + 1) * size - old_height
A__ : Dict = (old_width // size + 1) * size - old_width
return pad(A__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
A__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : List[str] = do_pad if do_pad is not None else self.do_pad
A__ : Any = pad_size if pad_size is not None else self.pad_size
A__ : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
A__ : str = [to_numpy_array(A__ ) for image in images]
if do_rescale:
A__ : Union[str, Any] = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_pad:
A__ : Optional[Any] = [self.pad(A__ , size=A__ ) for image in images]
A__ : Optional[int] = [to_channel_dimension_format(A__ , A__ ) for image in images]
A__ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 365 |
import argparse
from collections import defaultdict
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] , lowercase_: Optional[Any] , lowercase_: Union[str, Any] , lowercase_: Any ) -> int:
A__ : Optional[Any] = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowercase_ , """r""" ) as f:
A__ : Union[str, Any] = f.readlines()
A__ : str = f"""class {class_name}("""
A__ : Optional[Any] = f"""{4 * ' '}def {test_name}("""
A__ : Union[str, Any] = f"""{8 * ' '}{correct_line.split()[0]}"""
A__ : Optional[int] = f"""{16 * ' '}{correct_line.split()[0]}"""
A__ : int = False
A__ : str = False
A__ : Tuple = False
A__ : Optional[int] = False
A__ : Optional[Any] = 0
A__ : Dict = 0
A__ : List[str] = []
for line in lines:
if line.startswith(lowercase_ ):
A__ : Dict = True
elif in_class and line.startswith(lowercase_ ):
A__ : Optional[Any] = True
elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )):
A__ : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A__ : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A__ : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
A__ : List[str] = False
else:
new_lines.append(lowercase_ )
with open(lowercase_ , """w""" ) as f:
for line in new_lines:
f.write(lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
if fail is not None:
with open(lowercase_ , """r""" ) as f:
A__ : Dict = {l.strip() for l in f.readlines()}
else:
A__ : List[str] = None
with open(lowercase_ , """r""" ) as f:
A__ : int = f.readlines()
A__ : Union[str, Any] = defaultdict(lowercase_ )
for line in correct_lines:
A__ , A__ , A__ , A__ : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A_ : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 141 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = test_results.split(""" """ )
_A : int = 0
_A : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_A : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = {}
_A : Optional[int] = None
_A : Union[str, Any] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""",snake_case_ ):
_A : Any = True
_A : Tuple = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_A : List[str] = line
_A : Optional[int] = False
return failures
class lowercase :
def __init__( self , _a , _a ) -> str:
_A : int = title
_A : List[str] = doc_test_results["""time_spent"""].split(""",""" )[0]
_A : Union[str, Any] = doc_test_results["""success"""]
_A : str = doc_test_results["""failures"""]
_A : Optional[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
_A : Optional[int] = doc_test_results
@property
def a__ ( self ) -> str:
_A : str = [self._time_spent]
_A : Dict = 0
for time in time_spent:
_A : List[Any] = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_A : Any = [0, 0, time_parts[0]]
_A , _A , _A : Tuple = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_A , _A , _A : int = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_a )}h{int(_a )}m{int(_a )}s'''
@property
def a__ ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def a__ ( self ) -> Dict:
_A : int = 40
_A : int = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(_a , _a )}
_A : int = """"""
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def a__ ( self ) -> str:
_A : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def a__ ( ) -> int:
_A : List[str] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=_a , )
def a__ ( self ) -> Optional[int]:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_A : List[Any] = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
_A : Optional[int] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=_a , )
def a__ ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_A : List[Any] = """"""
for key, value in failures.items():
_A : Dict = value[:200] + """ [Truncated]""" if len(_a ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_A : int = job_name
_A : int = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_A : List[str] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def a__ ( self ) -> Tuple:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_A : List[str] = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_A : Dict = sorted(self.doc_test_results.items() , key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_A : Optional[int] = F'''*Num failures* :{len(job_result["failed"] )} \n'''
_A : Dict = job_result["""failures"""]
_A : Dict = self.get_reply_blocks(_a , _a , _a , text=_a )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=_a , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def lowerCAmelCase_ ( ):
_A : Dict = os.environ["""GITHUB_RUN_ID"""]
_A : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_A : List[Any] = requests.get(snake_case_ ).json()
_A : int = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""",snake_case_ )
return {}
def lowerCAmelCase_ ( snake_case_ ):
_A : Any = {}
if os.path.exists(snake_case_ ):
_A : str = os.listdir(snake_case_ )
for file in files:
try:
with open(os.path.join(snake_case_,snake_case_ ),encoding="""utf-8""" ) as f:
_A : Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(snake_case_,snake_case_ )}.''' ) from e
return _artifact
def lowerCAmelCase_ ( ):
class lowercase :
def __init__( self , _a ) -> Tuple:
_A : List[str] = name
_A : Dict = []
def __str__( self ) -> str:
return self.name
def a__ ( self , _a ) -> str:
self.paths.append({"""name""": self.name, """path""": path} )
_A : Dict[str, Artifact] = {}
_A : Union[str, Any] = filter(os.path.isdir,os.listdir() )
for directory in directories:
_A : Any = directory
if artifact_name not in _available_artifacts:
_A : int = Artifact(snake_case_ )
_available_artifacts[artifact_name].add_path(snake_case_ )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get("run_doctests")
_snake_case = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_snake_case = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact["stats"])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ", "
_snake_case = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_snake_case = line.replace("FAILED ", "")
_snake_case = line.split()[0].replace("\n", "")
if "::" in line:
_snake_case , _snake_case = line.split("::")
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else "N/A"
_snake_case = failure
break
_snake_case = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = XLMTokenizer
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a =dict(zip(__A , range(len(__A ) ) ) )
a =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__A ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a ='''lower newer'''
a ='''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =XLMTokenizer(self.vocab_file , self.merges_file )
a ='''lower'''
a =['''low''', '''er</w>''']
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokens + ['''<unk>''']
a =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 215 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =tokenizer(example['''content'''] , truncation=lowercase )['''input_ids''']
a =len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCamelCase_ : Optional[int] = HfArgumentParser(PretokenizationArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : Tuple = multiprocessing.cpu_count()
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCamelCase_ : Any = time.time()
lowerCamelCase_ : int = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowerCamelCase_ : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s') | 215 | 1 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def lowercase__ ( __lowercase : int ) -> bool:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 1 |
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case_ : Callable[[int | float], int | float] , snake_case_ : int | float , snake_case_ : int | float , snake_case_ : int = 100 , ):
snake_case__ : Optional[Any] = x_start
snake_case__ : Union[str, Any] = fnc(snake_case_ )
snake_case__ : List[Any] = 0.0
for _ in range(snake_case_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case__ : Optional[Any] = (x_end - x_start) / steps + xa
snake_case__ : Dict = fnc(snake_case_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case__ : Optional[Any] = xa
snake_case__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
__lowerCamelCase : Optional[Any] = 10
while i <= 10_0000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 352 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any ):
snake_case__ : List[str] = b.T
snake_case__ : Union[str, Any] = np.sum(np.square(snake_case_ ) , axis=1 )
snake_case__ : Dict = np.sum(np.square(snake_case_ ) , axis=0 )
snake_case__ : Dict = np.matmul(snake_case_ , snake_case_ )
snake_case__ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple ):
snake_case__ : Tuple = x.reshape(-1 , 3 )
snake_case__ : int = squared_euclidean_distance(snake_case_ , snake_case_ )
return np.argmin(snake_case_ , axis=1 )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self : str , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : Union[str, Any] , ):
super().__init__(**__A )
snake_case__ : Optional[int] = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case__ : List[Any] = get_size_dict(__A )
snake_case__ : Any = np.array(__A ) if clusters is not None else None
snake_case__ : Optional[Any] = do_resize
snake_case__ : Any = size
snake_case__ : List[Any] = resample
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = do_color_quantize
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ):
snake_case__ : List[Any] = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__A , size=(size["height"], size["width"]) , resample=__A , data_format=__A , **__A )
def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ):
snake_case__ : List[str] = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A )
snake_case__ : List[Any] = image - 1
return image
def _lowercase ( self : Dict , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ):
snake_case__ : Any = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = size if size is not None else self.size
snake_case__ : Union[str, Any] = get_size_dict(__A )
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ : Union[str, Any] = clusters if clusters is not None else self.clusters
snake_case__ : Union[str, Any] = np.array(__A )
snake_case__ : Any = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ : Optional[Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
snake_case__ : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_normalize:
snake_case__ : Union[str, Any] = [self.normalize(image=__A ) for image in images]
if do_color_quantize:
snake_case__ : int = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ : int = np.array(__A )
snake_case__ : Dict = color_quantize(__A , __A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ : str = images.shape[0]
snake_case__ : str = images.reshape(__A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ : Union[str, Any] = list(__A )
else:
snake_case__ : Any = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case__ : Optional[int] = {"input_ids": images}
return BatchFeature(data=__A , tensor_type=__A )
| 286 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCamelCase = "sshleifer/mar_enro_6_3_student"
class UpperCamelCase__( __A ):
def snake_case__ ( self ) -> Optional[Any]:
super().setUp()
A__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=__UpperCAmelCase ,)
A__ = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def snake_case__ ( self ) -> Optional[Any]:
MarianMTModel.from_pretrained(__UpperCAmelCase )
@slow
@require_torch_gpu
def snake_case__ ( self ) -> Dict:
A__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
A__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
A__ = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
for k, v in env_vars_to_replace.items():
A__ = bash_script.replace(__UpperCAmelCase ,str(__UpperCAmelCase ) )
A__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
A__ = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
A__ = ['finetune.py'] + bash_script.split() + args
with patch.object(__UpperCAmelCase ,'argv' ,__UpperCAmelCase ):
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(__UpperCAmelCase )
A__ = SummarizationModule.add_model_specific_args(__UpperCAmelCase ,os.getcwd() )
A__ = parser.parse_args()
A__ = main(__UpperCAmelCase )
# Check metrics
A__ = load_json(model.metrics_save_path )
A__ = metrics['val'][0]
A__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] ,__UpperCAmelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] ,17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
A__ = os.listdir(__UpperCAmelCase )
A__ = [x for x in contents if x.endswith('.ckpt' )][0]
A__ = os.path.join(args.output_dir ,__UpperCAmelCase )
A__ = torch.load(__UpperCAmelCase ,map_location='cpu' )
A__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A__ = {os.path.basename(__UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class UpperCamelCase__( __A ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def snake_case__ ( self ) -> str:
A__ = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
A__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
A__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
A__ = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' )
A__ = bash_script.replace('--fp16 ' ,' ' )
for k, v in env_vars_to_replace.items():
A__ = bash_script.replace(__UpperCAmelCase ,str(__UpperCAmelCase ) )
A__ = self.get_auto_remove_tmp_dir()
A__ = bash_script.replace('--fp16' ,'' )
A__ = 6
A__ = (
['distillation.py']
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
f'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__UpperCAmelCase ,'argv' ,__UpperCAmelCase ):
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(__UpperCAmelCase )
A__ = SummarizationDistiller.add_model_specific_args(__UpperCAmelCase ,os.getcwd() )
A__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
A__ = distill_main(__UpperCAmelCase )
# Check metrics
A__ = load_json(model.metrics_save_path )
A__ = metrics['val'][0]
A__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] ,__UpperCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
A__ = os.listdir(__UpperCAmelCase )
A__ = [x for x in contents if x.endswith('.ckpt' )][0]
A__ = os.path.join(args.output_dir ,__UpperCAmelCase )
A__ = torch.load(__UpperCAmelCase ,map_location='cpu' )
A__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
A__ = {os.path.basename(__UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 221 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221 | 1 |
'''simple docstring'''
import sys
lowercase : str = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def SCREAMING_SNAKE_CASE__ ( __A = N ) -> Optional[Any]:
_snake_case = -sys.maxsize - 1
for i in range(len(_lowercase ) - 12 ):
_snake_case = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_snake_case = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 359 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def lowerCamelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = np.array(__A )
_snake_case = npimg.shape
return {"hash": hashimage(__A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
__lowercase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowercase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MaskGenerationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'facebook/sam-vit-huge'
_snake_case = pipeline('mask-generation' , model=lowerCAmelCase_ )
_snake_case = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 160 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=2 , __magic_name__=56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=2 , __magic_name__=2 , __magic_name__=7 , __magic_name__="gelu_new" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , __magic_name__="block_sparse" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=3 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = parent
snake_case_ : str = batch_size
snake_case_ : str = seq_length
snake_case_ : str = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : List[Any] = use_token_type_ids
snake_case_ : Optional[int] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : int = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : int = initializer_range
snake_case_ : Union[str, Any] = num_choices
snake_case_ : Tuple = rescale_embeddings
snake_case_ : Dict = attention_type
snake_case_ : str = use_bias
snake_case_ : Any = block_size
snake_case_ : Tuple = num_random_blocks
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_attention_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs
snake_case_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[str] = False
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Optional[int] = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : str = self._prepare_for_class(__magic_name__ , __magic_name__ )
snake_case_ : Union[str, Any] = model_class(__magic_name__ )
@jax.jit
def model_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ):
return model(input_ids=__magic_name__ , attention_mask=__magic_name__ , **__magic_name__ )
with self.subTest('''JIT Enabled''' ):
snake_case_ : List[Any] = model_jitted(**__magic_name__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case_ : List[str] = model_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1e-5 , __magic_name__="outputs" , __magic_name__=None ) -> List[str]:
'''simple docstring'''
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 279 |
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : int):
super().__init__()
_A : Tuple = module
_A : str = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE) , )
_A : Any = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int):
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) + self.adapter(SCREAMING_SNAKE_CASE)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
a = "bigscience/bloom-1b7"
# Constant values
a = 2.109_6595_5269_2574
a = "Hello my name is"
a = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a = 10
def A ( self : List[Any]):
# Models and tokenizer
_A : List[Any] = AutoTokenizer.from_pretrained(self.model_name)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[str]):
super().setUp()
# Models and tokenizer
_A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
_A : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : List[Any]):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : str):
_A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'quantization_config'))
_A : str = config.to_dict()
_A : Dict = config.to_diff_dict()
_A : int = config.to_json_string()
def A ( self : List[str]):
from bitsandbytes.nn import Paramsabit
_A : List[str] = self.model_fpaa.get_memory_footprint()
_A : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
_A : str = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A ( self : List[Any]):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A ( self : Any):
_A : List[str] = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : List[str]):
_A : List[Any] = BitsAndBytesConfig()
_A : Optional[int] = True
_A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : Union[str, Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : Any = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE):
_A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_A : str = self.tokenizer(self.input_text , return_tensors='pt')
_A : Optional[Any] = self.model_fpaa.to(torch.floataa)
_A : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
_A : Tuple = self.model_fpaa.to('cpu')
# Check this does not throw an error
_A : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
_A : int = self.model_fpaa.float()
def A ( self : Dict):
_A : Any = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any]):
_A : Tuple = 't5-small'
_A : Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_A : str = AutoTokenizer.from_pretrained(cls.model_name)
_A : Optional[int] = 'Translate in German: Hello, my dog is cute'
def A ( self : Any):
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str]):
from transformers import TaForConditionalGeneration
_A : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_A : List[str] = None
# test with `t5-small`
_A : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : Dict = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Any = model.generate(**SCREAMING_SNAKE_CASE)
_A : List[str] = modules
def A ( self : Dict):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_A : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : str = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Union[str, Any]):
super().setUp()
# model_name
_A : Dict = 'bigscience/bloom-560m'
_A : List[str] = 't5-small'
# Different types of model
_A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Sequence classification model
_A : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# CausalLM model
_A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Seq2seq model
_A : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : str):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().setUp()
def A ( self : Any):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int]):
_A : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_A : Union[str, Any] = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[Any]):
super().setUp()
def A ( self : Any):
_A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
_A : Tuple = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
_A : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
_A : Any = 'facebook/opt-350m'
super().setUp()
def A ( self : List[Any]):
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
_A : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
_A : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_A : Tuple = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE)):
_A : List[Any] = LoRALayer(module.q_proj , rank=16)
_A : Union[str, Any] = LoRALayer(module.k_proj , rank=16)
_A : str = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
_A : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_A : Union[str, Any] = model.forward(**SCREAMING_SNAKE_CASE)
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "gpt2-xl"
a = 3.3191_8548_5415_2187
| 227 |
'''simple docstring'''
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
a = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a = numpy.vectorize(lambda a_ : x % 36 )
a = numpy.vectorize(a_ )
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : numpy.ndarray):
_A : Union[str, Any] = self.modulus(SCREAMING_SNAKE_CASE) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_A : Optional[int] = encrypt_key.shape[0]
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
return self.key_string.index(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
return self.key_string[round(SCREAMING_SNAKE_CASE)]
def A ( self : List[Any]):
_A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : int = det % len(self.key_string)
_A : str = len(self.key_string)
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string)) != 1:
_A : Optional[int] = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE)
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : List[Any] = [char for char in text.upper() if char in self.key_string]
_A : List[str] = chars[-1]
while len(SCREAMING_SNAKE_CASE) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE)
return "".join(SCREAMING_SNAKE_CASE)
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.process_text(text.upper())
_A : List[str] = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[Any] = text[i : i + self.break_key]
_A : Optional[int] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : List[str] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[
0
]
_A : str = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def A ( self : Union[str, Any]):
_A : List[Any] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_A : Optional[int] = det % len(self.key_string)
_A : List[str] = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_A : Union[str, Any] = i
break
_A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE))
def A ( self : Any , SCREAMING_SNAKE_CASE : str):
_A : List[str] = self.make_decrypt_key()
_A : Dict = self.process_text(text.upper())
_A : str = ''
for i in range(0 , len(SCREAMING_SNAKE_CASE) - self.break_key + 1 , self.break_key):
_A : Optional[int] = text[i : i + self.break_key]
_A : Union[str, Any] = [self.replace_letters(SCREAMING_SNAKE_CASE) for char in batch]
_A : Tuple = numpy.array([vec]).T
_A : Optional[int] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE)).T.tolist()[0]
_A : Tuple = ''.join(
self.replace_digits(SCREAMING_SNAKE_CASE) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
_A : List[Any] = int(input('Enter the order of the encryption key: ' ) )
_A : List[str] = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowerCamelCase ):
_A : str = [int(lowerCamelCase ) for x in input().split()]
hill_matrix.append(lowerCamelCase )
_A : Dict = HillCipher(numpy.array(lowerCamelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_A : List[str] = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_A : int = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowerCamelCase ) )
elif option == "2":
_A : int = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'unispeech'
def __init__( self , __snake_case=3_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1E-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __snake_case=(5, 2, 2, 2, 2, 2, 2) , __snake_case=(1_0, 3, 3, 3, 3, 2, 2) , __snake_case=False , __snake_case=1_2_8 , __snake_case=1_6 , __snake_case=False , __snake_case=True , __snake_case=0.05 , __snake_case=1_0 , __snake_case=2 , __snake_case=0.0 , __snake_case=1_0 , __snake_case=0 , __snake_case=3_2_0 , __snake_case=2 , __snake_case=0.1 , __snake_case=1_0_0 , __snake_case=2_5_6 , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case="mean" , __snake_case=False , __snake_case=False , __snake_case=2_5_6 , __snake_case=8_0 , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=0.5 , **__snake_case , ):
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(__snake_case )
snake_case = list(__snake_case )
snake_case = list(__snake_case )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = num_ctc_classes
snake_case = vocab_size
snake_case = do_stable_layer_norm
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case = num_codevectors_per_group
snake_case = num_codevector_groups
snake_case = contrastive_logits_temperature
snake_case = feat_quantizer_dropout
snake_case = num_negatives
snake_case = codevector_dim
snake_case = proj_codevector_dim
snake_case = diversity_loss_weight
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# pretraining loss
snake_case = replace_prob
@property
def a_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 127 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : List[Any] = "PoolFormerConfig"
# Base docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : str = [1, 5_12, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Any = "sail/poolformer_s12"
_SCREAMING_SNAKE_CASE : List[Any] = "tabby, tabby cat"
_SCREAMING_SNAKE_CASE : List[str] = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
snake_case = 1 - drop_prob
snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case = keep_prob + torch.rand(UpperCamelCase_ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
snake_case = input.div(UpperCamelCase_ ) * random_tensor
return output
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case = None ):
super().__init__()
snake_case = drop_prob
def a_ ( self , __snake_case ):
return drop_path(__snake_case , self.drop_prob , self.training )
def a_ ( self ):
return "p={}".format(self.drop_prob )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ):
super().__init__()
snake_case = patch_size if isinstance(__snake_case , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case = stride if isinstance(__snake_case , collections.abc.Iterable ) else (stride, stride)
snake_case = padding if isinstance(__snake_case , collections.abc.Iterable ) else (padding, padding)
snake_case = nn.Convad(__snake_case , __snake_case , kernel_size=__snake_case , stride=__snake_case , padding=__snake_case )
snake_case = norm_layer(__snake_case ) if norm_layer else nn.Identity()
def a_ ( self , __snake_case ):
snake_case = self.projection(__snake_case )
snake_case = self.norm(__snake_case )
return embeddings
class A__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , __snake_case , **__snake_case ):
super().__init__(1 , __snake_case , **__snake_case )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.AvgPoolad(__snake_case , stride=1 , padding=pool_size // 2 , count_include_pad=__snake_case )
def a_ ( self , __snake_case ):
return self.pool(__snake_case ) - hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = nn.Convad(__snake_case , __snake_case , 1 )
snake_case = PoolFormerDropPath(__snake_case )
if isinstance(config.hidden_act , __snake_case ):
snake_case = ACTaFN[config.hidden_act]
else:
snake_case = config.hidden_act
def a_ ( self , __snake_case ):
snake_case = self.conva(__snake_case )
snake_case = self.act_fn(__snake_case )
snake_case = self.drop(__snake_case )
snake_case = self.conva(__snake_case )
snake_case = self.drop(__snake_case )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
super().__init__()
snake_case = PoolFormerPooling(__snake_case )
snake_case = PoolFormerOutput(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
snake_case = PoolFormerGroupNorm(__snake_case )
# Useful for training neural nets
snake_case = PoolFormerDropPath(__snake_case ) if drop_path > 0.0 else nn.Identity()
snake_case = config.use_layer_scale
if config.use_layer_scale:
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((__snake_case) ) , requires_grad=__snake_case )
def a_ ( self , __snake_case ):
if self.use_layer_scale:
snake_case = self.pooling(self.before_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = ()
snake_case = self.output(self.after_norm(__snake_case ) )
snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case = hidden_states + self.drop_path(__snake_case )
snake_case = (output,) + outputs
return outputs
else:
snake_case = self.drop_path(self.pooling(self.before_norm(__snake_case ) ) )
# First residual connection
snake_case = pooling_output + hidden_states
snake_case = ()
# Second residual connection inside the PoolFormerOutput block
snake_case = self.drop_path(self.output(self.after_norm(__snake_case ) ) )
snake_case = hidden_states + layer_output
snake_case = (output,) + outputs
return outputs
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = config
# stochastic depth decay rule
snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case = nn.ModuleList(__snake_case )
# Transformer blocks
snake_case = []
snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__snake_case , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__snake_case ) )
snake_case = nn.ModuleList(__snake_case )
def a_ ( self , __snake_case , __snake_case=False , __snake_case=True ):
snake_case = () if output_hidden_states else None
snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case , snake_case = layers
# Get patch embeddings from hidden_states
snake_case = embedding_layer(__snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(__snake_case ):
snake_case = blk(__snake_case )
snake_case = layer_outputs[0]
if output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = PoolFormerConfig
__magic_name__ = 'poolformer'
__magic_name__ = 'pixel_values'
__magic_name__ = True
def a_ ( self , __snake_case ):
if isinstance(__snake_case , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__snake_case , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self , __snake_case , __snake_case=False ):
if isinstance(__snake_case , __snake_case ):
snake_case = value
_SCREAMING_SNAKE_CASE : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_SCREAMING_SNAKE_CASE : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config
snake_case = PoolFormerEncoder(__snake_case )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__snake_case , hidden_states=encoder_outputs.hidden_states , )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__()
snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self , __snake_case ):
snake_case = self.dense(__snake_case )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , snake_case__ , )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case ):
super().__init__(__snake_case )
snake_case = config.num_labels
snake_case = PoolFormerModel(__snake_case )
# Final norm
snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ):
snake_case = return_dict if return_dict is not None else self.config.use_return_dict
snake_case = self.poolformer(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , )
snake_case = outputs[0]
snake_case = self.classifier(self.norm(__snake_case ).mean([-2, -1] ) )
snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case = '''single_label_classification'''
else:
snake_case = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case = MSELoss()
if self.num_labels == 1:
snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case = loss_fct(__snake_case , __snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case = BCEWithLogitsLoss()
snake_case = loss_fct(__snake_case , __snake_case )
if not return_dict:
snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states )
| 127 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : list ):
if len(lowerCamelCase ) < 2:
return collection
def circle_sort_util(lowerCamelCase : list , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
lowerCAmelCase = False
if low == high:
return swapped
lowerCAmelCase = low
lowerCAmelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase , lowerCAmelCase = (
collection[right],
collection[left],
)
lowerCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase , lowerCAmelCase = (
collection[right + 1],
collection[left],
)
lowerCAmelCase = True
lowerCAmelCase = low + int((high - low) / 2 )
lowerCAmelCase = circle_sort_util(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = circle_sort_util(lowerCamelCase , mid + 1 , lowerCamelCase )
return swapped or left_swap or right_swap
lowerCAmelCase = True
while is_not_sorted is True:
lowerCAmelCase = circle_sort_util(lowerCamelCase , 0 , len(lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 55 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase )
return parser.parse_args()
def a_ ( ):
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(lowerCamelCase )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 55 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def _snake_case ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def _snake_case ( self , lowercase , lowercase , lowercase = False , lowercase = False , lowercase = False , lowercase = False , ) -> Optional[int]:
lowerCAmelCase = len(references[0] )
if any(len(lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCAmelCase = [[refs[i] for refs in references] for i in range(lowercase )]
lowerCAmelCase = TER(
normalized=lowercase , no_punct=lowercase , asian_support=lowercase , case_sensitive=lowercase , )
lowerCAmelCase = sb_ter.corpus_score(lowercase , lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 46 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = '''convbert'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Dict=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = vocab_size
a_ : List[str] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : int = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Optional[int] = embedding_size
a_ : List[Any] = head_ratio
a_ : List[Any] = conv_kernel_size
a_ : Tuple = num_groups
a_ : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a_ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 32 | 0 |
"""simple docstring"""
import numpy as np
lowercase__ = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] ):
lowerCAmelCase_ : Any = np.array(a_ )
def lowerCamelCase ( self : Optional[Any] , a_ : str ):
lowerCAmelCase_ : Dict = np.where(letter == self.SQUARE )
lowerCAmelCase_ : List[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase ( self : Optional[int] , a_ : int , a_ : int ):
lowerCAmelCase_ : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase ( self : List[str] , a_ : str ):
lowerCAmelCase_ : Optional[Any] = message.lower()
lowerCAmelCase_ : int = message.replace(" " , "" )
lowerCAmelCase_ : int = message.replace("j" , "i" )
lowerCAmelCase_ : Tuple = np.empty((2, len(a_ )) )
for letter_index in range(len(a_ ) ):
lowerCAmelCase_ : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : List[Any] = numbers[0]
lowerCAmelCase_ : Any = numbers[1]
lowerCAmelCase_ : Dict = first_step.reshape(2 * len(a_ ) )
lowerCAmelCase_ : List[Any] = ""
for numbers_index in range(len(a_ ) ):
lowerCAmelCase_ : str = int(second_step[numbers_index * 2] )
lowerCAmelCase_ : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase_ : Optional[Any] = self.numbers_to_letter(a_ , a_ )
lowerCAmelCase_ : str = encoded_message + letter
return encoded_message
def lowerCamelCase ( self : str , a_ : str ):
lowerCAmelCase_ : Tuple = message.lower()
message.replace(" " , "" )
lowerCAmelCase_ : Any = np.empty(2 * len(a_ ) )
for letter_index in range(len(a_ ) ):
lowerCAmelCase_ : int = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ : Optional[Any] = numbers[0]
lowerCAmelCase_ : Dict = numbers[1]
lowerCAmelCase_ : List[Any] = first_step.reshape((2, len(a_ )) )
lowerCAmelCase_ : Optional[int] = ""
for numbers_index in range(len(a_ ) ):
lowerCAmelCase_ : int = int(second_step[0, numbers_index] )
lowerCAmelCase_ : int = int(second_step[1, numbers_index] )
lowerCAmelCase_ : List[str] = self.numbers_to_letter(a_ , a_ )
lowerCAmelCase_ : Any = decoded_message + letter
return decoded_message
| 359 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
lowerCAmelCase_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
lowerCAmelCase_ : List[str] = ["key_proj", "value_proj", "query_proj"]
lowerCAmelCase_ : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ : Dict = key.split("." )
if attributes[0] == "lm_head":
lowerCAmelCase_ : int = prophet
lowerCAmelCase_ : int = prophet_old
else:
lowerCAmelCase_ : str = prophet.prophetnet
lowerCAmelCase_ : int = prophet_old.model
lowerCAmelCase_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ : Tuple = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[Any] = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ : str = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowerCAmelCase_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowerCAmelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ):
lowerCAmelCase_ : List[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ : List[str] = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ : int = True
break
if attribute.isdigit():
lowerCAmelCase_ : Tuple = model[int(__UpperCamelCase )]
lowerCAmelCase_ : Tuple = old_model[int(__UpperCamelCase )]
else:
lowerCAmelCase_ : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
lowerCAmelCase_ : Tuple = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 161 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=lowerCamelCase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=lowerCamelCase__ , default=5 )
parser.add_argument("--batch_size" , type=lowerCamelCase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=lowerCamelCase__ , default=1 )
parser.add_argument("--freeze" , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument("--learning_rate" , type=lowerCamelCase__ , default=5e-4 )
parser.add_argument("--seed" , type=lowerCamelCase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=lowerCamelCase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=lowerCamelCase__ , default=1_0 )
parser.add_argument("--weight_decay" , type=lowerCamelCase__ , default=0.01 )
parser.add_argument("--output_dir" , type=lowerCamelCase__ , default="./results" )
return parser.parse_args()
__A =load('''accuracy''')
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = eval_pred
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> None:
super().__init__()
lowerCamelCase_ = trainer
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict:
if control.should_evaluate:
lowerCamelCase_ = deepcopy(lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCamelCase_ ( ):
lowerCamelCase_ = get_args()
set_seed(args.seed )
lowerCamelCase_ = load_dataset("codeparrot/codecomplex" , split="train" )
lowerCamelCase_ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase_ = train_test["test"].train_test_split(test_size=0.5 )
lowerCamelCase_ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCamelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase_ = False
lowerCamelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ ):
lowerCamelCase_ = tokenizer(example["src"] , truncation=lowerCamelCase__ , max_length=1_0_2_4 )
lowerCamelCase_ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase_ = train_test_validation.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=train_test_validation["train"].column_names , )
lowerCamelCase_ = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
lowerCamelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class _snake_case ( _snake_case ):
snake_case__ = "bloom"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : List[Any] , UpperCAmelCase : List[str]=250880 , UpperCAmelCase : str=64 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Dict=8 , UpperCAmelCase : Any=1E-5 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=1 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Tuple , ):
__lowerCamelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCamelCase : str = kwargs.pop("n_embed" , UpperCamelCase__ )
__lowerCamelCase : str = hidden_size if n_embed is None else n_embed
__lowerCamelCase : Optional[Any] = n_layer
__lowerCamelCase : List[Any] = n_head
__lowerCamelCase : Any = layer_norm_epsilon
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : str = pretraining_tp
__lowerCamelCase : int = apply_residual_connection_post_layernorm
__lowerCamelCase : int = hidden_dropout
__lowerCamelCase : Dict = attention_dropout
__lowerCamelCase : Any = bos_token_id
__lowerCamelCase : Dict = eos_token_id
__lowerCamelCase : str = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
class _snake_case ( _snake_case ):
snake_case__ = version.parse("1.12" )
def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any = "default" , UpperCAmelCase : List[str] = None , UpperCAmelCase : Any = False , ):
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , "pad_token_id" , UpperCamelCase__ ):
# TODO: how to do that better?
__lowerCamelCase : Union[str, Any] = 0
@property
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" , inverted_values_shape=UpperCamelCase__ )
__lowerCamelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowerCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase__ ( self : Dict ):
return self._config.n_layer
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self._config.n_head
@property
def lowerCamelCase__ ( self : int ):
return 1E-3
def lowerCamelCase__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : int = -1 , UpperCAmelCase : Union[str, Any] = -1 , UpperCAmelCase : List[str] = False , UpperCAmelCase : Tuple = None , ):
__lowerCamelCase : Optional[Any] = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCamelCase , __lowerCamelCase : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCamelCase : Optional[int] = seqlen + 2
__lowerCamelCase : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
__lowerCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCamelCase : List[Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
__lowerCamelCase : str = common_inputs["attention_mask"]
if self.use_past:
__lowerCamelCase : List[Any] = ordered_inputs["attention_mask"].dtype
__lowerCamelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Any ):
return 13 | 362 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = [0, 1]
__lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCamelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 64 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict ):
UpperCAmelCase__ = []
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : int ):
self.events.append('on_init_end' )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_train_begin' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,**lowerCamelCase__ : int ):
self.events.append('on_train_end' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_epoch_begin' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[Any] ):
self.events.append('on_epoch_end' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_step_begin' )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_step_end' )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[str] ):
self.events.append('on_evaluate' )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_predict' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[Any] ):
self.events.append('on_save' )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Union[str, Any] ):
self.events.append('on_log' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ):
self.events.append('on_prediction_step' )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = tempfile.mkdtemp()
def __lowerCAmelCase ( self : int ):
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Optional[Any]=0 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : List[str]=64 ,lowerCamelCase__ : Tuple=64 ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any=False ,**lowerCamelCase__ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase__ = RegressionDataset(length=lowerCamelCase__ )
UpperCAmelCase__ = RegressionDataset(length=lowerCamelCase__ )
UpperCAmelCase__ = RegressionModelConfig(a=lowerCamelCase__ ,b=lowerCamelCase__ )
UpperCAmelCase__ = RegressionPreTrainedModel(lowerCamelCase__ )
UpperCAmelCase__ = TrainingArguments(self.output_dir ,disable_tqdm=lowerCamelCase__ ,report_to=[] ,**lowerCamelCase__ )
return Trainer(
lowerCamelCase__ ,lowerCamelCase__ ,train_dataset=lowerCamelCase__ ,eval_dataset=lowerCamelCase__ ,callbacks=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ):
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
# Order doesn't matter
UpperCAmelCase__ = sorted(lowerCamelCase__ ,key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cb.__class__.__name__ )
UpperCAmelCase__ = sorted(lowerCamelCase__ ,key=lambda lowerCamelCase__ : cb.__name__ if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCamelCase__ ,lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ ,cba.__class__ )
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(cba.__class__ ,lowerCamelCase__ )
else:
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = ['on_init_end', 'on_train_begin']
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(trainer.get_eval_dataloader() )
UpperCAmelCase__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowerCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase__ = self.get_trainer(disable_tqdm=lowerCamelCase__ )
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(cb.__class__ ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
# We can also add, pop, or remove by instance
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer()
UpperCAmelCase__ = trainer.callback_handler.callbacks[0]
UpperCAmelCase__ = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 ,lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' ,category=lowerCamelCase__ )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# Independent log/save/eval
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='steps' )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
UpperCAmelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='epoch' )
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# A bit of everything
UpperCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='steps' ,)
trainer.train()
UpperCAmelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ ,self.get_expected_events(lowerCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
UpperCAmelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowerCamelCase__ ) in warn_mock.call_args[0][0]
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(_A , _A ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_SCREAMING_SNAKE_CASE : Tuple = data_utils.TransfoXLTokenizer
_SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLCorpus
_SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
_SCREAMING_SNAKE_CASE : Any = data_utils
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_A , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(_A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(_A , _A )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , _A )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_A , _A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(_A )
SCREAMING_SNAKE_CASE__ = os.path.abspath(_A )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(_A )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(_A , _A , _A )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
print(F'''Save PyTorch model to {os.path.abspath(_A )}''' )
torch.save(model.state_dict() , _A )
print(F'''Save configuration file to {os.path.abspath(_A )}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 218 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _lowerCAmelCase ):
__A = (UnCLIPScheduler,)
def lowercase__ ( self : Dict , **lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = {
"num_train_timesteps": 1_000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowercase )
return config
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def lowercase__ ( self : Any ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Dict = self.get_scheduler_config(variance_type="fixed_small_log" )
lowercase_ :Any = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1e-5
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :List[str] = self.scheduler_classes[0]
lowercase_ :Dict = self.get_scheduler_config(variance_type="learned_range" )
lowercase_ :Optional[Any] = scheduler_class(**lowercase )
lowercase_ :Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1e-5
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Union[str, Any] = self.get_scheduler_config()
lowercase_ :str = scheduler_class(**lowercase )
lowercase_ :Optional[Any] = scheduler.timesteps
lowercase_ :Tuple = self.dummy_model()
lowercase_ :str = self.dummy_sample_deter
lowercase_ :Optional[Any] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
lowercase_ :int = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
lowercase_ :Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
lowercase_ :Dict = pred_prev_sample
lowercase_ :Dict = torch.sum(torch.abs(lowercase ) )
lowercase_ :Dict = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.scheduler_classes[0]
lowercase_ :int = self.get_scheduler_config()
lowercase_ :Optional[int] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
lowercase_ :str = scheduler.timesteps
lowercase_ :List[Any] = self.dummy_model()
lowercase_ :Union[str, Any] = self.dummy_sample_deter
lowercase_ :Tuple = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
lowercase_ :Tuple = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
lowercase_ :int = None
else:
lowercase_ :Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ :Any = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
lowercase_ :Optional[Any] = pred_prev_sample
lowercase_ :List[Any] = torch.sum(torch.abs(lowercase ) )
lowercase_ :Dict = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def lowercase__ ( self : int ):
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
| 223 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[Any]="pt" ):
lowercase_ :Dict = {"add_prefix_space": True} if isinstance(__lowerCamelCase ,__lowerCamelCase ) and not line.startswith(" " ) else {}
lowercase_ :str = padding_side
return tokenizer(
[line] ,max_length=__lowerCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__lowerCamelCase ,return_tensors=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : str=None ,):
lowercase_ :Optional[int] = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( _lowerCAmelCase ):
def __init__( self : Optional[int] , lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str="train" , lowercase : Dict=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : int="" , ):
"""simple docstring"""
super().__init__()
lowercase_ :List[Any] = Path(lowercase ).joinpath(type_path + ".source" )
lowercase_ :Dict = Path(lowercase ).joinpath(type_path + ".target" )
lowercase_ :Optional[int] = self.get_char_lens(self.src_file )
lowercase_ :List[str] = max_source_length
lowercase_ :str = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
lowercase_ :int = tokenizer
lowercase_ :Dict = prefix
if n_obs is not None:
lowercase_ :Union[str, Any] = self.src_lens[:n_obs]
lowercase_ :Optional[int] = src_lang
lowercase_ :str = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : str , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Tuple = index + 1 # linecache starts at 1
lowercase_ :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , lowercase ).rstrip("\n" )
lowercase_ :List[str] = linecache.getline(str(self.tgt_file ) , lowercase ).rstrip("\n" )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase_ :List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase ) else self.tokenizer
)
lowercase_ :int = self.tokenizer.generator if isinstance(self.tokenizer , lowercase ) else self.tokenizer
lowercase_ :List[str] = encode_line(lowercase , lowercase , self.max_source_length , "right" )
lowercase_ :Any = encode_line(lowercase , lowercase , self.max_target_length , "right" )
lowercase_ :Dict = source_inputs["input_ids"].squeeze()
lowercase_ :Tuple = target_inputs["input_ids"].squeeze()
lowercase_ :Optional[int] = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( lowercase : Union[str, Any] ):
"""simple docstring"""
return [len(lowercase ) for x in Path(lowercase ).open().readlines()]
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = torch.stack([x["input_ids"] for x in batch] )
lowercase_ :Dict = torch.stack([x["attention_mask"] for x in batch] )
lowercase_ :List[str] = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase_ :Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase )
else self.tokenizer.pad_token_id
)
lowercase_ :Union[str, Any] = trim_batch(lowercase , lowercase )
lowercase_ , lowercase_ :Optional[Any] = trim_batch(lowercase , lowercase , attention_mask=lowercase )
lowercase_ :Tuple = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase : List[str] =getLogger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :List[str] = get_git_info()
save_json(__lowerCamelCase ,os.path.join(__lowerCamelCase ,"git_log.json" ) )
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any]=4 ,**__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=__lowerCamelCase ,**__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( ):
lowercase_ :Dict = git.Repo(search_parent_directories=__lowerCamelCase )
lowercase_ :List[str] = {
"repo_id": str(__lowerCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowerCamelCase : Callable ,__lowerCamelCase : Iterable ):
return list(map(__lowerCamelCase ,__lowerCamelCase ) )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ):
with open(__lowerCamelCase ,"wb" ) as f:
return pickle.dump(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def remove_articles(__lowerCamelCase : Optional[int] ):
return re.sub(r"\b(a|an|the)\b" ," " ,__lowerCamelCase )
def white_space_fix(__lowerCamelCase : Dict ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Optional[Any] ):
lowercase_ :Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ):
lowercase_ :Tuple = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Dict = normalize_answer(__lowerCamelCase ).split()
lowercase_ :Tuple = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
lowercase_ :Tuple = sum(common.values() )
if num_same == 0:
return 0
lowercase_ :Union[str, Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :List[Any] = 1.0 * num_same / len(__lowerCamelCase )
lowercase_ :Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Union[str, Any] ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
lowercase_ :Any = 0
for hypo, pred in zip(__lowerCamelCase ,__lowerCamelCase ):
em += exact_match_score(__lowerCamelCase ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def UpperCAmelCase_ ( __lowerCamelCase : str ):
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ):
lowercase_ :Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase_ :List[str] = "dropout_rate"
for p in extra_params:
if getattr(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
if not hasattr(__lowerCamelCase ,__lowerCamelCase ) and not hasattr(__lowerCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
continue
lowercase_ :List[Any] = p if hasattr(__lowerCamelCase ,__lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase ,__lowerCamelCase ,getattr(__lowerCamelCase ,__lowerCamelCase ) )
delattr(__lowerCamelCase ,__lowerCamelCase )
return hparams, config
| 223 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase__ ( __UpperCamelCase):
'''simple docstring'''
_A = """marian"""
_A = ["""past_key_values"""]
_A = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :int , a :Dict=5_8_1_0_1 , a :List[Any]=None , a :Any=1_0_2_4 , a :int=1_2 , a :Tuple=4_0_9_6 , a :Tuple=1_6 , a :Union[str, Any]=1_2 , a :Any=4_0_9_6 , a :List[str]=1_6 , a :Union[str, Any]=0.0 , a :Optional[Any]=0.0 , a :Optional[int]=True , a :str=True , a :Any="gelu" , a :Any=1_0_2_4 , a :List[str]=0.1 , a :int=0.0 , a :Any=0.0 , a :Union[str, Any]=0.02 , a :List[str]=5_8_1_0_0 , a :str=False , a :Optional[int]=5_8_1_0_0 , a :Optional[Any]=0 , a :Optional[Any]=0 , a :str=True , **a :List[str] , ) -> List[str]:
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Optional[int] = decoder_vocab_size or vocab_size
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : Any = d_model
__UpperCamelCase : Optional[int] = encoder_ffn_dim
__UpperCamelCase : List[str] = encoder_layers
__UpperCamelCase : Optional[int] = encoder_attention_heads
__UpperCamelCase : Any = decoder_ffn_dim
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Tuple = decoder_attention_heads
__UpperCamelCase : List[Any] = dropout
__UpperCamelCase : Optional[Any] = attention_dropout
__UpperCamelCase : str = activation_dropout
__UpperCamelCase : str = activation_function
__UpperCamelCase : Optional[Any] = init_std
__UpperCamelCase : Optional[Any] = encoder_layerdrop
__UpperCamelCase : Optional[int] = decoder_layerdrop
__UpperCamelCase : int = use_cache
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
class lowerCamelCase__ ( __UpperCamelCase):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowerCamelCase ( self :str ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase : Union[str, Any] = {0: """batch"""}
__UpperCamelCase : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__UpperCamelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
__UpperCamelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase : Optional[int] = self.num_layers
for i in range(a ):
__UpperCamelCase : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
__UpperCamelCase : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : str = super().outputs
else:
__UpperCamelCase : Tuple = super(a , self ).outputs
if self.use_past:
__UpperCamelCase : Dict = self.num_layers
for i in range(a ):
__UpperCamelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
__UpperCamelCase : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _lowerCamelCase ( self :Optional[int] , a :PreTrainedTokenizer , a :int = -1 , a :int = -1 , a :bool = False , a :Optional[TensorType] = None , ) -> Any:
__UpperCamelCase : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
# Generate decoder inputs
__UpperCamelCase : str = seq_length if not self.use_past else 1
__UpperCamelCase : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
__UpperCamelCase : Optional[Any] = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase : str = dict(**a , **a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase : Union[str, Any] = common_inputs["""input_ids"""].shape
__UpperCamelCase : Dict = common_inputs["""decoder_input_ids"""].shape[1]
__UpperCamelCase : List[str] = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Any = decoder_seq_length + 3
__UpperCamelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a , a )] , dim=1 )
__UpperCamelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase : List[Any] = self.num_layers
__UpperCamelCase : Optional[Any] = min(a , a )
__UpperCamelCase : List[Any] = max(a , a ) - min_num_layers
__UpperCamelCase : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(a ):
common_inputs["past_key_values"].append(
(
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
torch.zeros(a ),
) )
# TODO: test this.
__UpperCamelCase : Tuple = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(a , a ):
common_inputs["past_key_values"].append((torch.zeros(a ), torch.zeros(a )) )
return common_inputs
def _lowerCamelCase ( self :Optional[Any] , a :PreTrainedTokenizer , a :int = -1 , a :int = -1 , a :bool = False , a :Optional[TensorType] = None , ) -> str:
__UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
a , a , a , a , a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCamelCase : Union[str, Any] = seqlen + 2
__UpperCamelCase : Tuple = self.num_layers
__UpperCamelCase : int = self.num_attention_heads
__UpperCamelCase : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : int = common_inputs["""attention_mask"""].dtype
__UpperCamelCase : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
__UpperCamelCase : Any = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(a )
]
return common_inputs
def _lowerCamelCase ( self :str , a :PreTrainedTokenizer , a :int = -1 , a :int = -1 , a :bool = False , a :Optional[TensorType] = None , ) -> str:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCamelCase : List[str] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase : List[Any] = tokenizer.num_special_tokens_to_add(a )
__UpperCamelCase : Optional[int] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase : Any = dict(tokenizer(a , return_tensors=a ) )
return common_inputs
def _lowerCamelCase ( self :str , a :PreTrainedTokenizer , a :int = -1 , a :int = -1 , a :bool = False , a :Optional[TensorType] = None , ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
else:
__UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
return common_inputs
def _lowerCamelCase ( self :Any , a :List[Any] , a :Optional[int] , a :List[str] , a :List[str] ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Tuple = super()._flatten_past_key_values_(a , a , a , a )
else:
__UpperCamelCase : Union[str, Any] = super(a , self )._flatten_past_key_values_(
a , a , a , a )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return 1E-4 | 356 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict=1_3 , a :Tuple=7 , a :List[Any]=True , a :List[str]=True , a :List[Any]=True , a :Optional[Any]=True , a :Union[str, Any]=9_9 , a :int=3_2 , a :Optional[Any]=2 , a :List[str]=4 , a :Optional[Any]=3_7 , a :Union[str, Any]="gelu" , a :Optional[int]=0.1 , a :Dict=0.1 , a :Tuple=5_1_2 , a :Union[str, Any]=1_6 , a :int=2 , a :Any=0.02 , a :Union[str, Any]=False , a :int=True , a :str="None" , a :Union[str, Any]=3 , a :str=4 , a :List[Any]=None , ) -> Tuple:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Dict = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = relative_attention
__UpperCamelCase : Union[str, Any] = position_biased_input
__UpperCamelCase : Any = pos_att_type
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] , a :int , a :List[Any] , a :Optional[int] , a :Union[str, Any] , a :Union[str, Any] , a :str , a :int ) -> Optional[int]:
__UpperCamelCase : List[str] = TFDebertaVaModel(config=a )
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : Optional[int] = model(a )
__UpperCamelCase : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :List[Any] , a :Dict , a :Tuple , a :Union[str, Any] , a :str , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :List[Any] , a :Optional[int] , a :Optional[Any] , a :int , a :Optional[int] , a :Any , a :Dict , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.num_labels
__UpperCamelCase : int = TFDebertaVaForSequenceClassification(config=a )
__UpperCamelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Dict , a :Union[str, Any] , a :Tuple , a :Tuple , a :Union[str, Any] , a :str ) -> int:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : str = TFDebertaVaForTokenClassification(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Union[str, Any] , a :List[str] , a :Union[str, Any] , a :Optional[Any] , a :Union[str, Any] , a :Tuple ) -> int:
__UpperCamelCase : List[Any] = TFDebertaVaForQuestionAnswering(config=a )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Dict = TFDebertaVaModelTester(self )
__UpperCamelCase : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCamelCase : List[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : str = model(a , attention_mask=a )[0]
__UpperCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 151 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] = 10**9 )->int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
snake_case_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'''{solution() = }''')
| 159 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=4 , ):
"""simple docstring"""
_lowercase : int = parent
_lowercase : List[str] = batch_size
_lowercase : Tuple = seq_length
_lowercase : Any = is_training
_lowercase : List[Any] = use_attention_mask
_lowercase : Dict = use_token_type_ids
_lowercase : int = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : int = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Optional[int] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Optional[int] = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : List[Any] = num_choices
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = None
if self.use_attention_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : str = None
if self.use_token_type_ids:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = config_and_inputs
_lowercase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs
_lowercase : Any = True
_lowercase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : List[str] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : Optional[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_lowercase : Optional[Any] = model(_UpperCamelCase )[0]
_lowercase : Any = [1, 11, 50265]
self.assertEqual(list(output.shape ) , _UpperCamelCase )
# compare the actual values for a slice.
_lowercase : Dict = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_UpperCamelCase )
_lowercase : int = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
_lowercase : List[Any] = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
_lowercase : List[str] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 250 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
A_ : Optional[int] ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
A_ : List[Any] ="\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
A_ : List[Any] ="\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def snake_case_ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def snake_case_ ( self , a__ , a__ , a__=None , a__="uniform_average" , a__=True ):
_lowerCamelCase = mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__SCREAMING_SNAKE_CASE :Tuple = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
__SCREAMING_SNAKE_CASE :Any = '''|'''.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE :int = re.compile(RF"^({joined_dirs}).*?\.py$")
__SCREAMING_SNAKE_CASE :Tuple = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 22 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__snake_case :List[Any] = logging.getLogger(__name__)
class _A :
def __init__( self : List[str]):
'''simple docstring'''
__a = False
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if not self.initialized:
__a = RagRetriever(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.retriever.index.init_index()
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a , __a = self.retriever._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return doc_ids, retrieved_doc_embeds
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
if index is not None and index.is_initialized() and len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''')
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__a = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for worker in self.retrieval_workers
])
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
logger.info('''initializing retrieval''')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
__a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
__a , __a = ray.get(random_worker.retrieve.remote(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
else:
__a , __a = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return super(__SCREAMING_SNAKE_CASE , cls).get_tokenizers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE) or RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE)
__a = rag_tokenizer.question_encoder
__a = rag_tokenizer.generator
if indexed_dataset is not None:
__a = '''custom'''
__a = CustomHFIndex(config.retrieval_vector_size , __SCREAMING_SNAKE_CASE)
else:
__a = cls._build_index(__SCREAMING_SNAKE_CASE)
return cls(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , retrieval_workers=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
| 49 | 0 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''decision_transformer'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A : Optional[int]=17 , A : List[str]=4 , A : int=1_28 , A : Union[str, Any]=40_96 , A : Any=True , A : Any=1 , A : List[Any]=10_24 , A : List[Any]=3 , A : Tuple=1 , A : Any=None , A : Optional[int]="relu" , A : Union[str, Any]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1E-5 , A : List[Any]=0.0_2 , A : Tuple=True , A : Union[str, Any]=True , A : str=5_02_56 , A : Union[str, Any]=5_02_56 , A : List[Any]=False , A : Optional[int]=False , **A : int , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = state_dim
_UpperCAmelCase = act_dim
_UpperCAmelCase = hidden_size
_UpperCAmelCase = max_ep_len
_UpperCAmelCase = action_tanh
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_attn_by_inverse_layer_idx
_UpperCAmelCase = reorder_and_upcast_attn
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A)
| 290 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = parent
def snake_case ( self ):
'''simple docstring'''
return {}
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
lowerCAmelCase__ :Any = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = MarkupLMFeatureExtractionTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase__ :Optional[int] = get_html_strings()[0]
lowerCAmelCase__ :Optional[int] = feature_extractor(__UpperCAmelCase )
# fmt: off
lowerCAmelCase__ :List[str] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
lowerCAmelCase__ :Any = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
# Test batched
lowerCAmelCase__ :List[Any] = get_html_strings()
lowerCAmelCase__ :Any = feature_extractor(__UpperCAmelCase )
# fmt: off
lowerCAmelCase__ :List[str] = expected_nodes + [['My First Heading', 'My first paragraph.']]
lowerCAmelCase__ :Tuple = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
| 293 |
from __future__ import annotations
import math
lowerCamelCase__ = """2020.9.26"""
lowerCamelCase__ = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float]:
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : List[str] = F'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Axis must be a str' )
lowerCAmelCase__ : Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : List[Any] = (
'Input values except axis must either be float or int: '
F'''{list(input_variables.values() )}'''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(SCREAMING_SNAKE_CASE_ ) - y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = y * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z
elif axis == "x":
lowerCAmelCase__ : Dict = y * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + y * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = x
elif axis == "y":
lowerCAmelCase__ : str = x * math.cos(SCREAMING_SNAKE_CASE_ ) - z * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = z * math.cos(SCREAMING_SNAKE_CASE_ ) + x * math.sin(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 212 | 0 |
def UpperCamelCase_( snake_case__: str = 10_00 ) -> int:
UpperCAmelCase__ = -1
UpperCAmelCase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase__ = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase__ = a * b * c
if candidate >= product:
UpperCAmelCase__ = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , **__a ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def UpperCamelCase__ (self , **__a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> List[str]:
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def UpperCamelCase__ (self , __a , __a=64 , __a = 0 , __a = 512 / 1500 , __a = 32 , __a = 1 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__a )
UpperCAmelCase__ = self.image_processor.size['longest_edge']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCAmelCase__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(__a , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase__ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop('input_boxes' )
UpperCAmelCase__ = model_inputs.pop('is_last' )
UpperCAmelCase__ = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs['pred_masks']
UpperCAmelCase__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCAmelCase__ = model_outputs['iou_scores']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase__ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ = torch.cat(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCAmelCase__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 335 | 0 |
"""simple docstring"""
from functools import reduce
UpperCAmelCase_ : Tuple = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _A (__a = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __a , __a : str(int(__a ) * int(__a ) ) , n[i : i + 13] ) )
for i in range(len(__a ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(__UpperCAmelCase ) == len(__UpperCAmelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase__ : Tuple = equationa
lowerCAmelCase__ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
lowerCAmelCase__ : int = aa * ba - aa * ba
lowerCAmelCase__ : str = ca * ba - ca * ba
lowerCAmelCase__ : List[str] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase__ : str = determinant_x / determinant
lowerCAmelCase__ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 363 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( __UpperCAmelCase ) -> None:
lowerCAmelCase__ , lowerCAmelCase__ : int = analyze_text(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[str] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(__UpperCAmelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Tuple = sum(two_char_strings.values() )
lowerCAmelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : int = two_char_strings[sequence]
lowerCAmelCase__ : str = int(__UpperCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__UpperCAmelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def lowercase_ ( __UpperCAmelCase ) -> tuple[dict, dict]:
lowerCAmelCase__ : Any = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__UpperCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ) -> Any:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 212 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Dict = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
a__ : Any = F"https://www.google.com/search?q={query}&num=100"
a__ : List[str] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
a__ : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
a__ : Dict = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 313 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
snake_case__ : int = word_bank or []
# create a table
snake_case__ : int = len(a__ ) + 1
snake_case__ : list[list[list[str]]] = []
for _ in range(a__ ):
table.append([] )
# seed value
snake_case__ : Tuple = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
snake_case__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 369 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : Optional[int] = len(__lowerCAmelCase ) + 1
snake_case__ : Tuple = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ : str = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
snake_case__ : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
snake_case__ : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ : List[str] = dp[i - 1][j]
else:
snake_case__ : Union[str, Any] = 0
else:
snake_case__ : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ = '''aab'''
A__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 44 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_:Tuple = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_:Optional[Any] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_:List[str] = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_:Any = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_:str = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_:int = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_:Any = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_:Optional[Any] = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_:Optional[int] = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_:Any = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE_:Optional[int] = re.compile(R"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE_:Optional[Any] = re.compile(R"""^\s*else:""")
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
if _re_test_backend.search(_lowerCAmelCase ) is None:
return None
A : List[Any] = [b[0] for b in _re_backend.findall(_lowerCAmelCase )]
backends.sort()
return "_and_".join(_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A : List[str] = f.readlines()
A : int = 0
while line_index < len(_lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCAmelCase ):
A : int = _re_one_line_import_struct.search(_lowerCAmelCase ).groups()[0]
A : Dict = re.findall("""\[([^\]]+)\]""" , _lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A : List[Any] = _re_import_struct_key_value.search(_lowerCAmelCase )
if single_line_import_search is not None:
A : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A : int = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A : Tuple = lines[line_index]
if _re_import_struct_add_one.search(_lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCAmelCase ) is not None:
A : Tuple = _re_import_struct_add_many.search(_lowerCAmelCase ).groups()[0].split(""", """ )
A : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_between_brackets.search(_lowerCAmelCase ) is not None:
A : List[str] = _re_between_brackets.search(_lowerCAmelCase ).groups()[0].split(""", """ )
A : Optional[int] = [obj[1:-1] for obj in imports if len(_lowerCAmelCase ) > 0]
objects.extend(_lowerCAmelCase )
elif _re_quote_object.search(_lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Dict = []
while (
line_index < len(_lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A : Optional[Any] = lines[line_index]
A : int = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : int = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A : Any = lines[line_index]
A : Union[str, Any] = _re_import.search(_lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
def find_duplicates(_lowerCAmelCase ):
return [k for k, v in collections.Counter(_lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Tuple = []
for key in import_dict_objects.keys():
A : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
A : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Any = """base imports""" if key == """none""" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : Dict = []
for root, _, files in os.walk(_lowerCAmelCase ):
if "__init__.py" in files:
A : Dict = os.path.join(_lowerCAmelCase , """__init__.py""" )
A : List[str] = parse_init(_lowerCAmelCase )
if objects is not None:
A : Optional[int] = analyze_results(*_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
A : Optional[Any] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(_lowerCAmelCase ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : Any = []
for path, directories, files in os.walk(_lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A : Optional[Any] = str((Path(_lowerCAmelCase ) / folder).relative_to(_lowerCAmelCase ) )
A : Any = short_path.replace(os.path.sep , """.""" )
submodules.append(_lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
A : List[str] = str((Path(_lowerCAmelCase ) / fname).relative_to(_lowerCAmelCase ) )
A : Tuple = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_lowerCAmelCase )
return submodules
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
A : Any = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_lowerCAmelCase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A : Any = spec.loader.load_module()
A : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCAmelCase ) > 0:
A : Dict = """\n""".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 116 |
from manim import *
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Union[str, Any] = Rectangle(height=0.5, width=0.5 )
A : Optional[int] = Rectangle(height=0.25, width=0.25 )
A : Optional[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
A : List[str] = [mem.copy() for i in range(6 )]
A : Any = [mem.copy() for i in range(6 )]
A : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : str = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : List[Any] = Text("""CPU""", font_size=24 )
A : Optional[int] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
A : List[Any] = [mem.copy() for i in range(4 )]
A : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : Dict = Text("""GPU""", font_size=24 )
A : Any = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
A : Optional[int] = [mem.copy() for i in range(6 )]
A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : Optional[int] = Text("""Model""", font_size=24 )
A : List[Any] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
A : Tuple = []
A : Tuple = []
A : Any = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
A : Any = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=lowerCamelCase__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=lowerCamelCase__, buff=0.0 )
self.add(lowerCamelCase__ )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ )
A : int = [mem.copy() for i in range(6 )]
A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : str = Text("""Loaded Checkpoint""", font_size=24 )
A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase__ )
A : Optional[int] = []
A : List[Any] = []
for i, rect in enumerate(lowerCamelCase__ ):
A : int = fill.copy().set_fill(lowerCamelCase__, opacity=0.7 )
target.move_to(lowerCamelCase__ )
ckpt_arr.append(lowerCamelCase__ )
A : List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__, *lowerCamelCase__ )
A : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A : List[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=18, )
blue_text.next_to(lowerCamelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
A : List[str] = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''', font_size=24, )
step_a.move_to([2, 2, 0] )
A : List[str] = [meta_mem.copy() for i in range(6 )]
A : List[Any] = [meta_mem.copy() for i in range(6 )]
A : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : Dict = VGroup(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0 )
A : Optional[Any] = Text("""Disk""", font_size=24 )
A : List[str] = Group(lowerCamelCase__, lowerCamelCase__ ).arrange(lowerCamelCase__, buff=0.5, aligned_edge=lowerCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCamelCase__, run_time=3 ), Write(lowerCamelCase__, run_time=1 ), Create(lowerCamelCase__, run_time=1 ) )
A : str = []
for i, rect in enumerate(lowerCamelCase__ ):
A : Optional[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase__, run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(FadeOut(lowerCamelCase__ ) )
A : List[str] = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''', font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__, run_time=3 ) )
self.play(
FadeOut(lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, *lowerCamelCase__ ), )
self.wait()
| 116 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase_ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str] )-> int:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Optional[int]:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[int] )-> List[str]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_snake_case : Union[str, Any] = tmp_path_factory.getbasetemp() / 'cache'
_snake_case : Optional[int] = test_hf_cache_home / 'datasets'
_snake_case : Any = test_hf_cache_home / 'metrics'
_snake_case : str = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowerCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowerCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowerCAmelCase ) )
_snake_case : List[str] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowerCAmelCase ) )
_snake_case : str = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCAmelCase ) )
@pytest.fixture(autouse=lowerCAmelCase , scope='session' )
def lowerCamelCase_ ( )-> List[Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: int )-> Dict:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: str )-> List[str]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowerCAmelCase )
| 260 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]:
# Initialise PyTorch model
_snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
_snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 260 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
_A : Any = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
_A : Tuple = '''document_qa'''
_A : Dict = AutoProcessor
_A : Tuple = VisionEncoderDecoderModel
_A : Optional[int] = ['''image''', '''text''']
_A : Optional[int] = ['''text''']
def __init__( self : Any , *__a : List[str] , **__a : Any ) -> Optional[Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : "Image" , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : int = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase : str = task_prompt.replace("""{user_input}""" , __a )
__lowercase : Union[str, Any] = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids
__lowercase : int = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> int:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.pre_processor.batch_decode(__a )[0]
__lowercase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase : Optional[Any] = re.sub(r"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token
__lowercase : Dict = self.pre_processor.tokenajson(__a )
return sequence["answer"] | 233 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : list[tuple[float, float]] ) -> str:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float = 0.01 ) -> Optional[int]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case =TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : int , UpperCAmelCase__ : T ) -> List[str]:
lowerCAmelCase = data
lowerCAmelCase = None
def __str__( self : Optional[int] ) -> str:
return F'''{self.data}'''
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
lowerCAmelCase = None
def __iter__( self : Any ) -> Iterator[T]:
lowerCAmelCase = self.top
while node:
yield node.data
lowerCAmelCase = node.next
def __str__( self : str ) -> str:
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __len__( self : Optional[int] ) -> int:
return len(tuple(iter(self ) ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> bool:
return self.top is None
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : T ) -> None:
lowerCAmelCase = Node(UpperCAmelCase__ )
if not self.is_empty():
lowerCAmelCase = self.top
lowerCAmelCase = node
def __UpperCAmelCase ( self : str ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCAmelCase__ )
lowerCAmelCase = self.top
lowerCAmelCase = self.top.next
return pop_node.data
def __UpperCAmelCase ( self : List[Any] ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['OwlViTFeatureExtractor']
lowerCAmelCase : Optional[Any] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCamelCase ( UpperCamelCase__ ):
if hor == 1_2_8:
UpperCAmelCase__ : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Tuple = (3_2, 1_2_8, 2_5_6)
UpperCAmelCase__ : Union[str, Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
UpperCAmelCase__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCAmelCase__ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCAmelCase__ : Any = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCAmelCase__ : Tuple = model.state_dict()
UpperCAmelCase__ : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCAmelCase__ : List[Any] = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCAmelCase__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCAmelCase__ : Optional[Any] = model
UpperCAmelCase__ : Dict = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 163 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
UpperCAmelCase_ : Optional[Any] = 1_28
elif "12-12" in model_name:
UpperCAmelCase_ : Any = 12
UpperCAmelCase_ : Optional[int] = 12
elif "14-14" in model_name:
UpperCAmelCase_ : Optional[int] = 14
UpperCAmelCase_ : List[str] = 14
elif "16-16" in model_name:
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : int = 16
else:
raise ValueError("Model not supported" )
UpperCAmelCase_ : Any = "huggingface/label-files"
if "speech-commands" in model_name:
UpperCAmelCase_ : Dict = 35
UpperCAmelCase_ : int = "speech-commands-v2-id2label.json"
else:
UpperCAmelCase_ : Union[str, Any] = 5_27
UpperCAmelCase_ : Union[str, Any] = "audioset-id2label.json"
UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : Any = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
if "module.v" in name:
UpperCAmelCase_ : str = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
UpperCAmelCase_ : Optional[int] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
UpperCAmelCase_ : int = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
UpperCAmelCase_ : List[str] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase_ : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
UpperCAmelCase_ : str = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase_ : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : List[str] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : Dict = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
UpperCAmelCase_ : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
UpperCAmelCase_ : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : int = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
UpperCAmelCase_ : Any = key.split("." )
UpperCAmelCase_ : int = int(key_split[3] )
UpperCAmelCase_ : List[str] = config.hidden_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : Any = val[dim : dim * 2, :]
UpperCAmelCase_ : Optional[Any] = val[-dim:, :]
else:
UpperCAmelCase_ : Union[str, Any] = val[:dim]
UpperCAmelCase_ : Optional[Any] = val[dim : dim * 2]
UpperCAmelCase_ : str = val[-dim:]
else:
UpperCAmelCase_ : Any = val
return orig_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str]=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_audio_spectrogram_transformer_config(lowercase_ )
UpperCAmelCase_ : List[Any] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
UpperCAmelCase_ : Tuple = model_name_to_url[model_name]
UpperCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(lowercase_ , map_location="cpu" )
# remove some keys
remove_keys(lowercase_ )
# rename some keys
UpperCAmelCase_ : Optional[int] = convert_state_dict(lowercase_ , lowercase_ )
# load 🤗 model
UpperCAmelCase_ : Tuple = ASTForAudioClassification(lowercase_ )
model.eval()
model.load_state_dict(lowercase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
UpperCAmelCase_ : Tuple = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
UpperCAmelCase_ : Any = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
UpperCAmelCase_ : Optional[int] = 10_24 if "speech-commands" not in model_name else 1_28
UpperCAmelCase_ : List[str] = ASTFeatureExtractor(mean=lowercase_ , std=lowercase_ , max_length=lowercase_ )
if "speech-commands" in model_name:
UpperCAmelCase_ : Union[str, Any] = load_dataset("speech_commands" , "v0.02" , split="validation" )
UpperCAmelCase_ : Any = dataset[0]["audio"]["array"]
else:
UpperCAmelCase_ : int = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torchaudio.load(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = waveform.squeeze().numpy()
UpperCAmelCase_ : Optional[int] = feature_extractor(lowercase_ , sampling_rate=1_60_00 , return_tensors="pt" )
# forward pass
UpperCAmelCase_ : Tuple = model(**lowercase_ )
UpperCAmelCase_ : Tuple = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
UpperCAmelCase_ : Any = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
UpperCAmelCase_ : Tuple = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
UpperCAmelCase_ : str = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
UpperCAmelCase_ : Optional[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
UpperCAmelCase_ : str = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
UpperCAmelCase_ : Dict = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
UpperCAmelCase_ : Dict = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowercase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 362 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a__ ( _SCREAMING_SNAKE_CASE : str = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def a__ ( _SCREAMING_SNAKE_CASE : str = "" ) -> bool:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return True
UpperCAmelCase_ : List[str] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase_ : dict[str, int] = {}
for character in lower_case_input_str:
UpperCAmelCase_ : Any = character_freq_dict.get(_SCREAMING_SNAKE_CASE , 0 ) + 1
UpperCAmelCase_ : Union[str, Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( _SCREAMING_SNAKE_CASE : str = "" ) -> None:
"""simple docstring"""
print("\nFor string = " , _SCREAMING_SNAKE_CASE , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_SCREAMING_SNAKE_CASE ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_lowerCamelCase = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_lowerCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.