code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCamelCase : int = logging.getLogger(__name__)
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `<s>`
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE_ : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
SCREAMING_SNAKE_CASE_ : int = fp.readlines()
logger.info("Start encoding" )
logger.info(f'{len(lowerCAmelCase )} examples to process.' )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_0_0_0
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time()
for text in data:
SCREAMING_SNAKE_CASE_ : Tuple = f'{bos} {text.strip()} {sep}'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
SCREAMING_SNAKE_CASE_ : Dict = time.time()
logger.info("Finished binarization" )
logger.info(f'{len(lowerCAmelCase )} examples processed.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE_ : Dict = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase__ : int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
UpperCamelCase__ : Any = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
UpperCamelCase__ : Optional[Any] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
A_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : List[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : int = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 344 | 0 |
"""simple docstring"""
from math import ceil, sqrt
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 1000000 ):
_lowercase : Any = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowercase : Any = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowercase : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 365 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
snake_case_ = set()
snake_case_ = 0
snake_case_ = n + 1 # maximum limit
for a in range(2, __UpperCAmelCase ):
for b in range(2, __UpperCAmelCase ):
snake_case_ = a**b # calculates the current power
collect_powers.add(__UpperCAmelCase ) # adds the result to the set
return len(__UpperCAmelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 56 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase )] )
UpperCAmelCase__ : Any = np.array(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase ) ) , x.transpose() ) , lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (1, 2, 1)
UpperCAmelCase__ : Tuple = (1, 1, 0, 7)
UpperCAmelCase__ : int = SARIMAX(
lowerCAmelCase , exog=lowerCAmelCase , order=lowerCAmelCase , seasonal_order=lowerCAmelCase )
UpperCAmelCase__ : Any = model.fit(disp=lowerCAmelCase , maxiter=6_00 , method="""nm""" )
UpperCAmelCase__ : Optional[Any] = model_fit.predict(1 , len(lowerCAmelCase ) , exog=[test_match] )
return result[0]
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = regressor.predict(lowerCAmelCase )
return y_pred[0]
def a__ ( lowerCAmelCase ) -> float:
train_user.sort()
UpperCAmelCase__ : Optional[Any] = np.percentile(lowerCAmelCase , 25 )
UpperCAmelCase__ : str = np.percentile(lowerCAmelCase , 75 )
UpperCAmelCase__ : int = qa - qa
UpperCAmelCase__ : Union[str, Any] = qa - (iqr * 0.1)
return low_lim
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase__ : Tuple = not_safe + 1
else:
if abs(abs(lowerCAmelCase ) - abs(lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
_A = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
_A = Normalizer().fit_transform(data_input_df.values)
# split data
_A = normalize_df[:, 2].tolist()
_A = normalize_df[:, 0].tolist()
_A = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A = normalize_df[:, [1, 2]].tolist()
_A = x[: len(x) - 1]
_A = x[len(x) - 1 :]
# for linear regression & sarimax
_A = total_date[: len(total_date) - 1]
_A = total_user[: len(total_user) - 1]
_A = total_match[: len(total_match) - 1]
_A = total_date[len(total_date) - 1 :]
_A = total_user[len(total_user) - 1 :]
_A = total_match[len(total_match) - 1 :]
# voting system with forecasting
_A = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 171 | 0 |
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
for i in range(1, lowerCamelCase):
__lowerCAmelCase = collection[i]
__lowerCAmelCase = 0
__lowerCAmelCase = i - 1
while low <= high:
__lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase = mid - 1
else:
__lowerCAmelCase = mid + 1
for j in range(lowerCamelCase, lowerCamelCase, -1):
__lowerCAmelCase = collection[j - 1]
__lowerCAmelCase = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase : int = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 9 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __magic_name__( ):
__lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)]
__lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0)
return (arr, r)
_UpperCAmelCase : Dict = make_dataset()
def __magic_name__( lowerCamelCase, lowerCamelCase):
for triplet in permutations(lowerCamelCase, 3):
if sum(lowerCamelCase) == target:
return tuple(sorted(lowerCamelCase))
return (0, 0, 0)
def __magic_name__( lowerCamelCase, lowerCamelCase):
arr.sort()
__lowerCAmelCase = len(lowerCamelCase)
for i in range(n - 1):
__lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __magic_name__( ):
__lowerCAmelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCAmelCase = '''
triplet_sum1(*dataset)
'''
__lowerCAmelCase = '''
triplet_sum2(*dataset)
'''
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
__lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0)
return (min(lowerCamelCase), min(lowerCamelCase))
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase : Union[str, Any] = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class a ( a__ ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''deta'''
lowerCamelCase :List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=9_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=10_24 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=True , lowerCAmelCase_=3_00 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.25 , **lowerCAmelCase_ , ) -> str:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_A = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = backbone_config.pop("""model_type""" )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(lowerCAmelCase_ )
_A = backbone_config
_A = num_queries
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = init_xavier_std
_A = encoder_layerdrop
_A = auxiliary_loss
_A = position_embedding_type
# deformable attributes
_A = num_feature_levels
_A = encoder_n_points
_A = decoder_n_points
_A = two_stage
_A = two_stage_num_proposals
_A = with_box_refine
_A = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_A = class_cost
_A = bbox_cost
_A = giou_cost
# Loss coefficients
_A = mask_loss_coefficient
_A = dice_loss_coefficient
_A = bbox_loss_coefficient
_A = giou_loss_coefficient
_A = eos_coefficient
_A = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def UpperCAmelCase ( self ) -> List[str]:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.d_model
def UpperCAmelCase ( self ) -> str:
_A = copy.deepcopy(self.__dict__ )
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
| 180 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
_snake_case = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_snake_case = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
_snake_case = [0] * args.vocab_size
for k, v in counter.items():
_snake_case = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 324 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : str ) -> int:
_a : str = parent
_a : Union[str, Any] = config_class
_a : List[Any] = has_text_modality
_a : List[Any] = kwargs
_a : List[Any] = common_properties
def _lowercase ( self : int ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
_a : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
_a : Optional[int] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=f"""`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
_a : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[str]:
_a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = os.path.join(UpperCAmelCase__ , """config.json""" )
config_first.to_json_file(UpperCAmelCase__ )
_a : List[str] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Union[str, Any] ) -> Dict:
_a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
_a : Dict = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : Dict ) -> Tuple:
_a : List[Any] = self.config_class(**self.inputs_dict )
_a : Any = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_a : List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
_a : List[Any] = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
_a : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_a : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowercase ( self : Tuple ) -> List[str]:
if self.config_class.is_composition:
return
_a : str = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Dict = copy.deepcopy(UpperCAmelCase__ )
_a : Any = self.config_class(**UpperCAmelCase__ )
_a : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
_a : List[Any] = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowercase ( self : int ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 324 | 1 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 16
lowerCamelCase_ = 32
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ = 16 ):
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : List[str] = DatasetDict(
{
"train": dataset["train"].select(A__ ),
"validation": dataset["train"].select(A__ ),
"test": dataset["validation"],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=A__ ,max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = datasets.map(
A__ ,batched=A__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Tuple = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : List[str] = 8
else:
UpperCAmelCase_ : List[Any] = None
return tokenizer.pad(
A__ ,padding="longest" ,max_length=A__ ,pad_to_multiple_of=A__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
UpperCAmelCase_ : List[str] = DataLoader(
tokenized_datasets["train"] ,shuffle=A__ ,collate_fn=A__ ,batch_size=A__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] ,shuffle=A__ ,collate_fn=A__ ,batch_size=A__ )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["test"] ,shuffle=A__ ,collate_fn=A__ ,batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case ( A__ ,A__ ):
# New Code #
UpperCAmelCase_ : int = []
# Download the dataset
UpperCAmelCase_ : Any = load_dataset("glue" ,"mrpc" )
# Create our splits
UpperCAmelCase_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config["lr"]
UpperCAmelCase_ : Dict = int(config["num_epochs"] )
UpperCAmelCase_ : Union[str, Any] = int(config["seed"] )
UpperCAmelCase_ : List[Any] = int(config["batch_size"] )
UpperCAmelCase_ : Tuple = evaluate.load("glue" ,"mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
UpperCAmelCase_ : List[str] = kfold.split(np.zeros(datasets["train"].num_rows ) ,datasets["train"]["label"] )
UpperCAmelCase_ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_fold_dataloaders(
A__ ,A__ ,A__ ,A__ ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Optional[int] = AdamW(params=model.parameters() ,lr=A__ )
# Instantiate scheduler
UpperCAmelCase_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=A__ ,num_warmup_steps=1_00 ,num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
A__ ,A__ ,A__ ,A__ ,A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : List[str] = model(**A__ )
UpperCAmelCase_ : Optional[Any] = outputs.loss
UpperCAmelCase_ : Dict = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**A__ )
UpperCAmelCase_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=A__ ,references=A__ ,)
UpperCAmelCase_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,A__ )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase_ : List[Any] = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ )
UpperCAmelCase_ : Dict = outputs.logits
UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase_ : int = torch.cat(A__ ,dim=0 )
UpperCAmelCase_ : List[str] = torch.stack(A__ ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = metric.compute(predictions=A__ ,references=A__ )
accelerator.print("Average test metrics from all folds:" ,A__ )
def snake_case ( ):
UpperCAmelCase_ : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=A__ ,default=A__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" ,type=A__ ,default=3 ,help="The number of splits to perform across the dataset" )
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : Dict = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(A__ ,A__ )
if __name__ == "__main__":
main()
| 268 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case ( A__ ,A__ ,A__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Dict = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1
UpperCAmelCase_ : int | float = 0
for i in range(A__ ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : str = summ
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Dict = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[Any] = summ
UpperCAmelCase_ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case ( A__ ):
UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )]
UpperCAmelCase_ : str = time.time()
max_subarray(A__ ,0 ,input_size - 1 )
UpperCAmelCase_ : int = time.time()
return end - start
def snake_case ( ):
UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(A__ ,A__ ):
print(A__ ,"\t\t" ,A__ )
plt.plot(A__ ,A__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 268 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) ->str:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
_UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
_UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : str , lowercase_ : str=13 , lowercase_ : List[Any]=7 , lowercase_ : int=True , lowercase_ : Union[str, Any]=False , lowercase_ : List[str]=99 , lowercase_ : Optional[int]=16 , lowercase_ : List[str]=2 , lowercase_ : str=4 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]="relu" , lowercase_ : int=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Any=20 , lowercase_ : Any=2 , lowercase_ : Any=1 , lowercase_ : int=0 , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.eos_token_id # Eos Token
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCamelCase = input_ids.clamp(self.pad_token_id + 1)
_UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1)
_UpperCamelCase = self.get_config()
_UpperCamelCase = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_)
return config, inputs_dict
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple , lowercase_ : Any , lowercase_ : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = MaMaaaModel(config=lowercase_).get_decoder().to(lowercase_).eval()
_UpperCamelCase = inputs_dict["input_ids"]
_UpperCamelCase = inputs_dict["attention_mask"]
_UpperCamelCase = inputs_dict["head_mask"]
# first forward pass
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_)["last_hidden_state"]
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_)[
"last_hidden_state"
]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-2))
def __UpperCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = MaMaaaModel(config=lowercase_).to(lowercase_).eval()
_UpperCamelCase = model(**lowercase_)
_UpperCamelCase = outputs.encoder_last_hidden_state
_UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_encoder()
encoder.save_pretrained(lowercase_)
_UpperCamelCase = MaMaaaEncoder.from_pretrained(lowercase_).to(lowercase_)
_UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_decoder()
decoder.save_pretrained(lowercase_)
_UpperCamelCase = MaMaaaDecoder.from_pretrained(lowercase_).to(lowercase_)
_UpperCamelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__A = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__A = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__A = True
__A = True
__A = False
__A = False
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str]) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MaMaaaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_)
_UpperCamelCase , _UpperCamelCase = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_)
self.assertEqual(info["missing_keys"] , [])
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_))
if not self.is_encoder_decoder:
_UpperCamelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
_UpperCamelCase = inputs["input_ids"]
_UpperCamelCase = inputs.get("decoder_input_ids" , lowercase_)
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_)
_UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
_UpperCamelCase = wte(lowercase_)
else:
_UpperCamelCase = wte(lowercase_)
_UpperCamelCase = wte(lowercase_)
with torch.no_grad():
model(**lowercase_)[0]
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = input_dict["input_ids"]
_UpperCamelCase = input_ids.ne(1).to(lowercase_)
_UpperCamelCase = MaMaaaForConditionalGeneration(lowercase_).eval().to(lowercase_)
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_)
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3)
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return torch.tensor(a__ , dtype=torch.long , device=a__ )
lowerCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M")
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M").to(lowercase_)
_UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]])
_UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]])
_UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_)
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)[0]
_UpperCamelCase = torch.Size((1, 11, 1024))
self.assertEqual(output.shape , lowercase_)
# change to expected output here
_UpperCamelCase = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=lowercase_)
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(lowercase_)
# change to intended input
_UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]])
_UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]])
_UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_)
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)[0]
_UpperCamelCase = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape , lowercase_)
# change to expected output here
_UpperCamelCase = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=lowercase_)
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_))
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(lowercase_)
_UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en")
_UpperCamelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_UpperCamelCase = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt")
_UpperCamelCase = model.generate(
input_ids=dct["input_ids"].to(lowercase_) , attention_mask=dct["attention_mask"].to(lowercase_) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en") , )
_UpperCamelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
_UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_)
assert generated == expected_en
| 63 | from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = 30
_UpperCamelCase = self.seq_length + self.mem_len
_UpperCamelCase = 15
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = [10, 50, 80]
_UpperCamelCase = 32
_UpperCamelCase = 32
_UpperCamelCase = 4
_UpperCamelCase = 8
_UpperCamelCase = 128
_UpperCamelCase = 2
_UpperCamelCase = 2
_UpperCamelCase = None
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = 3
_UpperCamelCase = self.vocab_size - 1
_UpperCamelCase = 0.01
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel(lowercase_)
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
_UpperCamelCase , _UpperCamelCase = model([input_ids_a, mems_a]).to_tuple()
_UpperCamelCase = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCamelCase , _UpperCamelCase = model(lowercase_).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLForSequenceClassification(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__A = () if is_tf_available() else ()
__A = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str]) -> Any:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , d_embed=37)
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_UpperCamelCase = model.get_output_embeddings()
assert isinstance(lowercase_ , tf.keras.layers.Layer)
_UpperCamelCase = model.get_bias()
assert name is None
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFTransfoXLModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
_UpperCamelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCamelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCamelCase = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_)
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_)
| 63 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
UpperCAmelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["input_ids"]
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , snake_case , snake_case=None , snake_case=False , snake_case="utf8" , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case = None , **snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , vocab_file=snake_case , encoding=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
lowercase = do_lower_case
lowercase = sentencepiece_model_ckpt
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase = self.load_vocab(filepath=snake_case )
else:
lowercase = {self.sp_model.id_to_piece(snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowercase = {v: k for k, v in self.vocab.items()}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if text is None:
return None
lowercase = self.tokenize(snake_case )
lowercase , lowercase = '', []
for i, ch in enumerate(snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowercase = self.SP_CHAR_MAPPING.get(snake_case )
else:
lowercase = unicodedata.normalize('NFKC' , snake_case )
if self.is_whitespace(snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case ) )
lowercase , lowercase , lowercase = normalized_text, [], 0
if self.do_lower_case:
lowercase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase = token[1:]
lowercase = text[offset:].index(snake_case ) + offset
lowercase = start + len(snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase = end
return token_mapping
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.vocab )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return "".join((self.SP_CHAR_MAPPING.get(snake_case , snake_case ) for c in text) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False , snake_case=64 , snake_case=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowercase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowercase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowercase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowercase = self.sp_model.EncodeAsPieces(snake_case )
else:
lowercase = self.sp_model.SampleEncodeAsPieces(snake_case , snake_case , snake_case )
lowercase = []
for pi, piece in enumerate(snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case ) and pi != 0:
new_pieces.append(snake_case )
continue
else:
continue
lowercase = 0
for i, chunk in enumerate(snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case ) or self.is_punct(snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case )
lowercase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase = i
if len(snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(snake_case ).replace(snake_case , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convert_ids_to_tokens(snake_case )
lowercase = ''.join(snake_case ).replace(snake_case , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.reverse_vocab.get(snake_case , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case ) + 1) + [1] * (len(snake_case ) + 3)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case ) == 1:
lowercase = unicodedata.category(snake_case )
if cat == "Zs":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = {}
with io.open(snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(snake_case ):
lowercase = line.rstrip('\n' )
lowercase = int(snake_case )
return token_to_idx
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = 0
if os.path.isdir(snake_case ):
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowercase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowercase = token_index
writer.write(token + '\n' )
index += 1
lowercase = os.path.join(snake_case , 'sentencepiece.bpe.model' )
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (vocab_file,)
| 195 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = r'\w+[.]\d+'
lowercase = re.findall(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for pat in pats:
lowercase = key.replace(__SCREAMING_SNAKE_CASE , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase = flax_model.init_weights(PRNGKey(__SCREAMING_SNAKE_CASE ) )
lowercase = flatten_dict(__SCREAMING_SNAKE_CASE )
lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase = rename_key(__SCREAMING_SNAKE_CASE )
lowercase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
lowercase , lowercase = rename_key_and_reshape_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase = jnp.asarray(__SCREAMING_SNAKE_CASE )
return unflatten_dict(__SCREAMING_SNAKE_CASE )
| 195 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Optional[Any] ={
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : Union[str, Any] ={
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = ['input_ids', 'attention_mask']
snake_case__ : int = BartTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**__lowerCAmelCase )
lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase = """post_processor"""
lowercase = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase = tuple(state["""sep"""] )
if "cls" in state:
lowercase = tuple(state["""cls"""] )
lowercase = False
if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = add_prefix_space
lowercase = True
if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets:
lowercase = trim_offsets
lowercase = True
if changes_to_apply:
lowercase = getattr(__lowerCAmelCase , state.pop("""type""" ) )
lowercase = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
lowercase = value
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 368 | """simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[datasets.Features] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =ParquetConfig
def __a ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : List[Any] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : str = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , "rb" ) as f:
a : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(lowerCAmelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , self.info.features.arrow_schema )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> int:
a : int = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , "rb" ) as f:
a : List[Any] = pq.ParquetFile(lowerCAmelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
a : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 105 |
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case_ ( A_ ):
__A : List[str] = 42
__A : Any = None
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=0.999 , _lowerCamelCase : Optional[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : Dict):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : int):
return math.exp(t * -12.0)
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''')
lowercase__ : List[Any] = []
for i in range(__lowerCamelCase):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase) / alpha_bar_fn(__lowerCamelCase) , __lowerCamelCase))
return torch.tensor(__lowerCamelCase , dtype=torch.floataa)
class snake_case_ ( A_ ,A_ ):
@register_to_config
def __init__( self : List[Any] , lowercase_ : int = 10_00 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ) -> int:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'" )
lowercase__ : int = betas_for_alpha_bar(_lowerCamelCase )
lowercase__ : Union[str, Any] = 1.0 - self.betas
lowercase__ : str = torch.cumprod(self.alphas , dim=0 )
lowercase__ : List[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowercase__ : Union[str, Any] = 1.0
# setable values
lowercase__ : int = None
lowercase__ : Optional[int] = torch.from_numpy(np.arange(0 , _lowerCamelCase )[::-1].copy() )
lowercase__ : Union[str, Any] = variance_type
def __UpperCamelCase ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ) -> Tuple:
return sample
def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ) -> Any:
lowercase__ : int = num_inference_steps
lowercase__ : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : Any = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowercase__ : str = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[int]=None ) -> Any:
if prev_timestep is None:
lowercase__ : Optional[Any] = t - 1
lowercase__ : Union[str, Any] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Optional[int] = 1 - alpha_prod_t
lowercase__ : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : List[Any] = self.betas[t]
else:
lowercase__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Optional[int] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : List[Any] = torch.log(torch.clamp(_lowerCamelCase , min=1E-20 ) )
lowercase__ : Optional[int] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : str = variance.log()
lowercase__ : Tuple = beta.log()
lowercase__ : str = (predicted_variance + 1) / 2
lowercase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : Any=None , lowercase_ : bool = True , ) -> List[str]:
lowercase__ : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : Dict = torch.split(_lowerCamelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : Union[str, Any] = t - 1
lowercase__ : Union[str, Any] = self.alphas_cumprod[t]
lowercase__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Any = 1 - alpha_prod_t
lowercase__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : int = self.betas[t]
lowercase__ : int = self.alphas[t]
else:
lowercase__ : Any = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = torch.clamp(
_lowerCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : Optional[Any] = 0
if t > 0:
lowercase__ : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_lowerCamelCase , device=model_output.device )
lowercase__ : List[str] = self._get_variance(
_lowerCamelCase , predicted_variance=_lowerCamelCase , prev_timestep=_lowerCamelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ : Union[str, Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
lowercase__ : Any = variance * variance_noise
lowercase__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_lowerCamelCase , pred_original_sample=_lowerCamelCase )
def __UpperCamelCase ( self : List[str] , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ) -> Dict:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Optional[int] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowercase__ : Dict = timesteps.to(original_samples.device )
lowercase__ : str = alphas_cumprod[timesteps] ** 0.5
lowercase__ : int = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
lowercase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowercase__ : List[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowercase__ : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = torch.permute(SCREAMING_SNAKE_CASE_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "metadata" in layer:
snake_case_ = layer.split('''metadata''' )
snake_case_ = ''''''.join(split_layer[0] )[:-1]
snake_case_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
snake_case_ = layer.split('''kvstore''' )
snake_case_ = ''''''.join(split_layer[0] )[:-1]
snake_case_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
snake_case_ = layer.split('''/''' )
snake_case_ = '''/'''.join(split_layer[:-1] )
snake_case_ = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case_ = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
snake_case_ = '''file'''
else:
snake_case_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = rename_keys(SCREAMING_SNAKE_CASE_ )
snake_case_ = {}
for k, v in current_block.items():
snake_case_ = v
snake_case_ = new_current_block
torch.save(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = WEIGHTS_NAME ) -> str:
'''simple docstring'''
snake_case_ = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
snake_case_ = []
snake_case_ = {}
snake_case_ = 0
snake_case_ = 0
os.makedirs(SCREAMING_SNAKE_CASE_, exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
snake_case_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
snake_case_ = flatten_dict(SCREAMING_SNAKE_CASE_, sep='''/''' )
snake_case_ = {}
for layer in checkpoint_info.keys():
snake_case_ ,snake_case_ ,snake_case_ = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
snake_case_ = content
else:
snake_case_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE_ )
snake_case_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case_ ,snake_case_ = rename_base_flax_keys(tuple(key.split('''/''' ) ), SCREAMING_SNAKE_CASE_ )
snake_case_ = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case_ = os.path.join(
SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case_ = {}
snake_case_ = 0
snake_case_ = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case_ = {}
snake_case_ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case_ = weights_name.replace(
'''.bin''', F"-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin" ) # len(sharded_state_dicts):05d}
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE_, weights_name.replace('''.bin''', F"-{idx+1:05d}-of-???.bin" ) )
os.rename(SCREAMING_SNAKE_CASE_, os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
snake_case_ = shard
for key in shard:
snake_case_ = shard_file
# Add the metadata
snake_case_ = {'''total_size''': total_size}
snake_case_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), '''w''', encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE_, indent=2, sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __magic_name__ ( ) -> str:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
snake_case_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
snake_case_ = TaTokenizer.from_pretrained('''t5-small''' )
snake_case_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).input_ids
snake_case_ = model.generate(SCREAMING_SNAKE_CASE_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 56 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class a__ :
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(_A )
__lowerCAmelCase = model_a(**_A )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFViTModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , _A=None , **_A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(_A , _A )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__lowerCAmelCase = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(_A , name="vision_model" )
__lowerCAmelCase = TFRobertaModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class a__ ( snake_case__ , unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModel(_A , name="vision_model" )
__lowerCAmelCase = TFBertModel(_A , name="text_model" )
return vision_model, text_model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_A )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__lowerCAmelCase = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_A , padding=_A , return_tensors="np" )
__lowerCAmelCase = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) )
| 92 | 0 |
from __future__ import annotations
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
__UpperCamelCase : Any = 0.00
__UpperCamelCase : Any = 0
for resistor in resistors:
if resistor <= 0:
__UpperCamelCase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowerCAmelCase )
first_sum += 1 / float(_lowerCAmelCase )
index += 1
return 1 / first_sum
def UpperCAmelCase_ (_lowerCAmelCase : list[float] ):
__UpperCamelCase : Union[str, Any] = 0.00
__UpperCamelCase : Optional[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__UpperCamelCase : List[Any] = F'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 171 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[2, 2, 3, 2] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=["stage2", "stage3", "stage4"] , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Union[str, Any] = num_stages
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : List[Any] = use_labels
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : List[Any] = out_features
__UpperCamelCase : Optional[Any] = num_labels
__UpperCamelCase : Optional[Any] = scope
__UpperCamelCase : List[Any] = num_stages
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : int = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase : Dict = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : Tuple = False
lowercase : List[str] = False
lowercase : Any = False
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = UperNetModelTester(self )
__UpperCamelCase : str = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : List[str] = [*signature.parameters.keys()]
__UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
__UpperCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : List[str] = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : List[str] = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Any = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : int = processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**__UpperCamelCase )
__UpperCamelCase : Dict = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) | 171 | 1 |
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
for i in range(1 , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = collection[i]
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[str] = i - 1
while low <= high:
__SCREAMING_SNAKE_CASE : str = (low + high) // 2
if val < collection[mid]:
__SCREAMING_SNAKE_CASE : List[str] = mid - 1
else:
__SCREAMING_SNAKE_CASE : str = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
__SCREAMING_SNAKE_CASE : Any = collection[j - 1]
__SCREAMING_SNAKE_CASE : Any = val
return collection
if __name__ == "__main__":
__lowerCAmelCase : Any =input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Any =[int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 9 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "spiece.model"}
__lowerCamelCase = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<sep>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<cls>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase=["<eop>", "<eod>"] ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
A__ = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,additional_special_tokens=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
A__ = 3
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
A__ = jieba
A__ = str.maketrans(' \n' ,'\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self ) -> List[str]:
return len(self.sp_model )
def snake_case__ ( self ) -> Any:
A__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
if self.remove_space:
A__ = ' '.join(inputs.strip().split() )
else:
A__ = inputs
A__ = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
A__ = unicodedata.normalize('NFKD' ,__UpperCAmelCase )
A__ = ''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
A__ = outputs.lower()
return outputs
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
A__ = self.preprocess_text(__UpperCAmelCase )
A__ = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
A__ = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ = cur_pieces[1:]
else:
A__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Optional[int]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase ,' ' ).strip()
return out_string
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
A__ = super()._decode(*__UpperCAmelCase ,**__UpperCAmelCase )
A__ = text.replace(' ' ,'' ).replace('\u2582' ,' ' ).replace('\u2583' ,'\n' )
return text
| 154 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase = Mapping[str, np.ndarray]
lowerCamelCase = Mapping[str, Any] # Is a nested dict.
lowerCamelCase = 0.01
@dataclasses.dataclass(frozen=lowerCamelCase__ )
class __magic_name__ :
'''simple docstring'''
lowerCamelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCamelCase__ : Optional[Sequence[int]] = None
def UpperCAmelCase__ ( _A : str ):
'''simple docstring'''
a__ =r'''(\[[A-Z]+\]\n)'''
a__ =[tag.strip() for tag in re.split(_A , _A ) if len(_A ) > 0]
a__ =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
a__ =["N", "CA", "C"]
a__ =None
a__ =None
a__ =None
for g in groups:
if "[PRIMARY]" == g[0]:
a__ =g[1][0].strip()
for i in range(len(_A ) ):
if seq[i] not in residue_constants.restypes:
a__ ='''X''' # FIXME: strings are immutable
a__ =np.array(
[residue_constants.restype_order.get(_A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
a__ =[]
for axis in range(3 ):
tertiary.append(list(map(_A , g[1][axis].split() ) ) )
a__ =np.array(_A )
a__ =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_A ):
a__ =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
a__ =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
a__ =np.zeros(
(
len(_A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_A ):
a__ =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_A , atom_mask=_A , aatype=_A , residue_index=np.arange(len(_A ) ) , b_factors=_A , )
def UpperCAmelCase__ ( _A : Protein , _A : int = 0 ):
'''simple docstring'''
a__ =[]
a__ =prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
a__ =prot.parents
a__ =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
a__ =[p for i, p in zip(_A , _A ) if i == chain_id]
if parents is None or len(_A ) == 0:
a__ =['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(_A )}""" )
return pdb_headers
def UpperCAmelCase__ ( _A : Protein , _A : str ):
'''simple docstring'''
a__ =[]
a__ =pdb_str.split('''\n''' )
a__ =prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
a__ =42
if prot.parents is not None and len(prot.parents ) > 0:
a__ =[]
if prot.parents_chain_index is not None:
a__ ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_A ) , [] )
parent_dict[str(_A )].append(_A )
a__ =max([int(_A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
a__ =parent_dict.get(str(_A ) , ['''N/A'''] )
parents_per_chain.append(_A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
a__ =[['''N/A''']]
def make_parent_line(_A : Sequence[str] ) -> str:
return F"""PARENT {" ".join(_A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
a__ =0
for i, l in enumerate(_A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_A ):
a__ =parents_per_chain[chain_counter]
else:
a__ =['''N/A''']
out_pdb_lines.append(make_parent_line(_A ) )
return "\n".join(_A )
def UpperCAmelCase__ ( _A : Protein ):
'''simple docstring'''
a__ =residue_constants.restypes + ['''X''']
def res_atoa(_A : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
a__ =residue_constants.atom_types
a__ =[]
a__ =prot.atom_mask
a__ =prot.aatype
a__ =prot.atom_positions
a__ =prot.residue_index.astype(np.intaa )
a__ =prot.b_factors
a__ =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
a__ =get_pdb_headers(_A )
if len(_A ) > 0:
pdb_lines.extend(_A )
a__ =aatype.shape[0]
a__ =1
a__ =0
a__ =string.ascii_uppercase
a__ =None
# Add all atom sites.
for i in range(_A ):
a__ =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
a__ ='''ATOM'''
a__ =atom_name if len(_A ) == 4 else F""" {atom_name}"""
a__ =''''''
a__ =''''''
a__ =1.00
a__ =atom_name[0] # Protein supports only C, N, O, S, this works.
a__ =''''''
a__ ='''A'''
if chain_index is not None:
a__ =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
a__ =(
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_A )
atom_index += 1
a__ =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
a__ =True
a__ =chain_index[i + 1]
if should_terminate:
# Close the chain.
a__ ='''TER'''
a__ =(
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_A , _A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_A )
def UpperCAmelCase__ ( _A : Protein ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCAmelCase__ ( _A : FeatureDict , _A : ModelOutput , _A : Optional[np.ndarray] = None , _A : Optional[np.ndarray] = None , _A : Optional[str] = None , _A : Optional[Sequence[str]] = None , _A : Optional[Sequence[int]] = None , ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_A , remark=_A , parents=_A , parents_chain_index=_A , )
| 188 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 1 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
if index == number_of_items:
return 0
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =knapsack(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 )
if weights[index] <= max_weight:
__UpperCamelCase =values[index] + knapsack(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_weight - weights[index] , index + 1 )
return max(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_rescale
_a = rescale_factor
_a = do_pad
def _UpperCAmelCase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
if not batched:
_a = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size['''shortest_edge'''] * h / w )
_a = self.size['''shortest_edge''']
elif w > h:
_a = self.size['''shortest_edge''']
_a = int(self.size['''shortest_edge'''] * w / h )
else:
_a = self.size['''shortest_edge''']
_a = self.size['''shortest_edge''']
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
_a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : int = ConditionalDetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
_a = ConditionalDetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Any:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _UpperCAmelCase ( self ) -> Dict:
# prepare image and target
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_a = json.loads(f.read() )
_a = {'''image_id''': 39769, '''annotations''': target}
# encode them
_a = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
_a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
_a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
_a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# prepare image, target and masks_path
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_a = json.loads(f.read() )
_a = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_a = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
_a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
_a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
_a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
_a = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
_a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) ) | 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution()) | 153 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
lowercase = mam_aaa['args'] or mam_aaa['cfg']['model']
lowercase = mam_aaa['model']
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
lowercase = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase = MaMaaaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowercase = state_dict['decoder.embed_tokens.weight']
lowercase = MaMaaaForConditionalGeneration(__SCREAMING_SNAKE_CASE )
model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 195 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['url'] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 195 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase_ : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCamelCase_ : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCamelCase_ : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://github.com/krishnap25/mauve" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/krishnap25/mauve"] ,reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] ,)
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="auto" ,_SCREAMING_SNAKE_CASE=-1 ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=500 ,_SCREAMING_SNAKE_CASE="gpt2-large" ,_SCREAMING_SNAKE_CASE=-1 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=25 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=25 ,) -> List[str]:
_snake_case = compute_mauve(
p_text=_SCREAMING_SNAKE_CASE ,q_text=_SCREAMING_SNAKE_CASE ,p_features=_SCREAMING_SNAKE_CASE ,q_features=_SCREAMING_SNAKE_CASE ,p_tokens=_SCREAMING_SNAKE_CASE ,q_tokens=_SCREAMING_SNAKE_CASE ,num_buckets=_SCREAMING_SNAKE_CASE ,pca_max_data=_SCREAMING_SNAKE_CASE ,kmeans_explained_var=_SCREAMING_SNAKE_CASE ,kmeans_num_redo=_SCREAMING_SNAKE_CASE ,kmeans_max_iter=_SCREAMING_SNAKE_CASE ,featurize_model_name=_SCREAMING_SNAKE_CASE ,device_id=_SCREAMING_SNAKE_CASE ,max_text_length=_SCREAMING_SNAKE_CASE ,divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE ,mauve_scaling_factor=_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,seed=_SCREAMING_SNAKE_CASE ,)
return out
| 355 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float:
"""simple docstring"""
_snake_case = x_start
_snake_case = fnc(_UpperCamelCase )
_snake_case = 0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case = (x_end - x_start) / steps + xa
_snake_case = fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_snake_case = xa
_snake_case = fxa
return area
if __name__ == "__main__":
def __a ( _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase_ : Optional[int] = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 142 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """RegNetConfig"""
# Base docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = [1, 1088, 7, 7]
# Image classification docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Any, __lowercase : int, __lowercase : int = 3, __lowercase : int = 1, __lowercase : int = 1, __lowercase : Optional[str] = "relu", **__lowercase : int, ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase__ = tf.keras.layers.ConvaD(
filters=__lowercase, kernel_size=__lowercase, strides=__lowercase, padding="VALID", groups=__lowercase, use_bias=__lowercase, name="convolution", )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization" )
lowercase__ = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self : Optional[Any], __lowercase : str ):
lowercase__ = self.convolution(self.padding(__lowercase ) )
lowercase__ = self.normalization(__lowercase )
lowercase__ = self.activation(__lowercase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Union[str, Any], __lowercase : RegNetConfig, **__lowercase : Tuple ):
super().__init__(**__lowercase )
lowercase__ = config.num_channels
lowercase__ = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name="embedder", )
def A__ ( self : List[str], __lowercase : int ):
lowercase__ = shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ = tf.transpose(__lowercase, perm=(0, 2, 3, 1) )
lowercase__ = self.embedder(__lowercase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Optional[int], __lowercase : int, __lowercase : int = 2, **__lowercase : Union[str, Any] ):
super().__init__(**__lowercase )
lowercase__ = tf.keras.layers.ConvaD(
filters=__lowercase, kernel_size=1, strides=__lowercase, use_bias=__lowercase, name="convolution" )
lowercase__ = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization" )
def A__ ( self : List[str], __lowercase : tf.Tensor, __lowercase : bool = False ):
return self.normalization(self.convolution(__lowercase ), training=__lowercase )
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Tuple, __lowercase : int, __lowercase : int, **__lowercase : Optional[int] ):
super().__init__(**__lowercase )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase, name="pooler" )
lowercase__ = [
tf.keras.layers.ConvaD(filters=__lowercase, kernel_size=1, activation="relu", name="attention.0" ),
tf.keras.layers.ConvaD(filters=__lowercase, kernel_size=1, activation="sigmoid", name="attention.2" ),
]
def A__ ( self : List[str], __lowercase : List[str] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase__ = self.pooler(__lowercase )
for layer_module in self.attention:
lowercase__ = layer_module(__lowercase )
lowercase__ = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Optional[int], __lowercase : RegNetConfig, __lowercase : int, __lowercase : int, __lowercase : int = 1, **__lowercase : Union[str, Any] ):
super().__init__(**__lowercase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1, out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(__lowercase, stride=__lowercase, name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ = [
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=config.hidden_act, name="layer.0" ),
TFRegNetConvLayer(
__lowercase, stride=__lowercase, groups=__lowercase, activation=config.hidden_act, name="layer.1" ),
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=__lowercase, name="layer.2" ),
]
lowercase__ = ACTaFN[config.hidden_act]
def A__ ( self : str, __lowercase : Tuple ):
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(__lowercase )
lowercase__ = self.shortcut(__lowercase )
hidden_state += residual
lowercase__ = self.activation(__lowercase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : str, __lowercase : RegNetConfig, __lowercase : int, __lowercase : int, __lowercase : int = 1, **__lowercase : Optional[Any] ):
super().__init__(**__lowercase )
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1, out_channels // config.groups_width )
lowercase__ = (
TFRegNetShortCut(__lowercase, stride=__lowercase, name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut" )
)
lowercase__ = [
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=config.hidden_act, name="layer.0" ),
TFRegNetConvLayer(
__lowercase, stride=__lowercase, groups=__lowercase, activation=config.hidden_act, name="layer.1" ),
TFRegNetSELayer(__lowercase, reduced_channels=int(round(in_channels / 4 ) ), name="layer.2" ),
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=__lowercase, name="layer.3" ),
]
lowercase__ = ACTaFN[config.hidden_act]
def A__ ( self : Any, __lowercase : List[Any] ):
lowercase__ = hidden_state
for layer_module in self.layers:
lowercase__ = layer_module(__lowercase )
lowercase__ = self.shortcut(__lowercase )
hidden_state += residual
lowercase__ = self.activation(__lowercase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : List[str], __lowercase : RegNetConfig, __lowercase : int, __lowercase : int, __lowercase : int = 2, __lowercase : int = 2, **__lowercase : Optional[Any] ):
super().__init__(**__lowercase )
lowercase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase__ = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase, __lowercase, __lowercase, stride=__lowercase, name="layers.0" ),
*[layer(__lowercase, __lowercase, __lowercase, name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def A__ ( self : str, __lowercase : Tuple ):
for layer_module in self.layers:
lowercase__ = layer_module(__lowercase )
return hidden_state
class _snake_case ( tf.keras.layers.Layer):
def __init__( self : Optional[int], __lowercase : RegNetConfig, **__lowercase : Union[str, Any] ):
super().__init__(**__lowercase )
lowercase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ) )
lowercase__ = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase, config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase, __lowercase, __lowercase, depth=__lowercase, name=F'''stages.{i+1}''' ) )
def A__ ( self : Optional[Any], __lowercase : tf.Tensor, __lowercase : bool = False, __lowercase : bool = True ):
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(__lowercase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase, hidden_states=__lowercase )
@keras_serializable
class _snake_case ( tf.keras.layers.Layer):
UpperCamelCase__ : Optional[int] =RegNetConfig
def __init__( self : str, __lowercase : Dict, **__lowercase : str ):
super().__init__(**__lowercase )
lowercase__ = config
lowercase__ = TFRegNetEmbeddings(__lowercase, name="embedder" )
lowercase__ = TFRegNetEncoder(__lowercase, name="encoder" )
lowercase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase, name="pooler" )
@unpack_inputs
def A__ ( self : Any, __lowercase : tf.Tensor, __lowercase : Optional[bool] = None, __lowercase : Optional[bool] = None, __lowercase : bool = False, ):
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(__lowercase, training=__lowercase )
lowercase__ = self.encoder(
__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
lowercase__ = tf.transpose(__lowercase, perm=(0, 3, 1, 2) )
lowercase__ = tf.transpose(__lowercase, perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ = tuple([tf.transpose(__lowercase, perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase, pooler_output=__lowercase, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class _snake_case ( lowercase__):
UpperCamelCase__ : List[str] =RegNetConfig
UpperCamelCase__ : Any ="""regnet"""
UpperCamelCase__ : List[Any] ="""pixel_values"""
@property
def A__ ( self : List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.floataa )}
lowercase_ = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowercase__ , )
class _snake_case ( lowercase__):
def __init__( self : str, __lowercase : RegNetConfig, *__lowercase : Dict, **__lowercase : List[Any] ):
super().__init__(__lowercase, *__lowercase, **__lowercase )
lowercase__ = TFRegNetMainLayer(__lowercase, name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, )
def A__ ( self : Union[str, Any], __lowercase : tf.Tensor, __lowercase : Optional[bool] = None, __lowercase : Optional[bool] = None, __lowercase : List[str]=False, ):
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
pixel_values=__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowercase__ , )
class _snake_case ( lowercase__ , lowercase__):
def __init__( self : Any, __lowercase : RegNetConfig, *__lowercase : int, **__lowercase : Optional[int] ):
super().__init__(__lowercase, *__lowercase, **__lowercase )
lowercase__ = config.num_labels
lowercase__ = TFRegNetMainLayer(__lowercase, name="regnet" )
# classification head
lowercase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def A__ ( self : Union[str, Any], __lowercase : tf.Tensor = None, __lowercase : tf.Tensor = None, __lowercase : bool = None, __lowercase : bool = None, __lowercase : Dict=False, ):
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(
__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier[0](__lowercase )
lowercase__ = self.classifier[1](__lowercase )
lowercase__ = None if labels is None else self.hf_compute_loss(labels=__lowercase, logits=__lowercase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase, logits=__lowercase, hidden_states=outputs.hidden_states )
| 224 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""perceiver"""
def __init__( self : Any, __lowercase : Optional[Any]=256, __lowercase : List[str]=1280, __lowercase : Dict=768, __lowercase : int=1, __lowercase : Dict=26, __lowercase : Any=8, __lowercase : List[Any]=8, __lowercase : Dict=None, __lowercase : List[Any]=None, __lowercase : str="kv", __lowercase : str=1, __lowercase : Optional[Any]=1, __lowercase : str="gelu", __lowercase : List[str]=0.1, __lowercase : int=0.02, __lowercase : Union[str, Any]=1e-1_2, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=262, __lowercase : str=2048, __lowercase : Optional[Any]=56, __lowercase : str=[368, 496], __lowercase : str=16, __lowercase : int=1920, __lowercase : Dict=16, __lowercase : List[Any]=[1, 16, 224, 224], **__lowercase : str, ):
super().__init__(**__lowercase )
lowercase__ = num_latents
lowercase__ = d_latents
lowercase__ = d_model
lowercase__ = num_blocks
lowercase__ = num_self_attends_per_block
lowercase__ = num_self_attention_heads
lowercase__ = num_cross_attention_heads
lowercase__ = qk_channels
lowercase__ = v_channels
lowercase__ = cross_attention_shape_for_attention
lowercase__ = self_attention_widening_factor
lowercase__ = cross_attention_widening_factor
lowercase__ = hidden_act
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_query_residual
# masked language modeling attributes
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
# image classification attributes
lowercase__ = image_size
# flow attributes
lowercase__ = train_size
# multimodal autoencoding attributes
lowercase__ = num_frames
lowercase__ = audio_samples_per_frame
lowercase__ = samples_per_patch
lowercase__ = output_shape
class _snake_case ( lowercase__):
@property
def A__ ( self : Optional[int] ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def A__ ( self : Optional[Any] ):
return 1e-4
def A__ ( self : Tuple, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional[TensorType] = None, __lowercase : int = 3, __lowercase : int = 40, __lowercase : int = 40, ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowercase, __lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = preprocessor.num_special_tokens_to_add(__lowercase )
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [" ".join(["a"] ) * seq_length] * batch_size
lowercase__ = dict(preprocessor(__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowercase, __lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ = self._generate_dummy_images(__lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = dict(preprocessor(images=__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 224 | 1 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[str] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : int ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Any ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 176 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Any =PRETRAINED_VOCAB_FILES_MAP
A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple =["""input_ids""", """attention_mask"""]
A__ : Optional[int] =BartTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE__ = 'post_processor'
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE__ = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def A_ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Any , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
SCREAMING_SNAKE_CASE__ = value
def A_ ( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 176 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1_0_0 , ):
_UpperCamelCase : Any = x_start
_UpperCamelCase : List[Any] = fnc(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0.0
for _ in range(UpperCAmelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_UpperCamelCase : Union[str, Any] = (x_end - x_start) / steps + xa
_UpperCamelCase : List[str] = fnc(UpperCAmelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_UpperCamelCase : List[Any] = xa
_UpperCamelCase : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def A__ ( UpperCAmelCase_ ):
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
snake_case_ : str = 10
while i <= 100000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 236 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_UpperCamelCase : Any = 1_0_2_4
_UpperCamelCase : List[Any] = 4_0_9_6
_UpperCamelCase : List[str] = 2_4
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
_UpperCamelCase : Any = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
_UpperCamelCase : Tuple = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase : Optional[int] = 7_6_8
_UpperCamelCase : Optional[Any] = [1, 1, 1, 0.5]
_UpperCamelCase : List[Any] = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
_UpperCamelCase : Optional[int] = 1_5_0
_UpperCamelCase : Tuple = 1_6
_UpperCamelCase : Dict = (1, 3_8_4, 3_8_4)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = 'project'
if "ade" in checkpoint_url:
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = 7_6_8
_UpperCamelCase : Union[str, Any] = [1, 1, 1, 0.5]
_UpperCamelCase : Union[str, Any] = 1_5_0
_UpperCamelCase : str = 1_6
_UpperCamelCase : Tuple = 'huggingface/label-files'
_UpperCamelCase : Tuple = 'ade20k-id2label.json'
_UpperCamelCase : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) ) , 'r' ) )
_UpperCamelCase : str = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCamelCase : int = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase : List[str] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_UpperCamelCase : int = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_UpperCamelCase : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_UpperCamelCase : Tuple = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_UpperCamelCase : List[str] = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_UpperCamelCase : int = name.replace('proj' , 'projection' )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCamelCase : str = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_UpperCamelCase : Dict = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_UpperCamelCase : List[str] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_UpperCamelCase : List[str] = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_UpperCamelCase : Any = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_UpperCamelCase : int = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_UpperCamelCase : Dict = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_UpperCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_UpperCamelCase : Dict = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_UpperCamelCase : Union[str, Any] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_UpperCamelCase : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_UpperCamelCase : Dict = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase : str = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase : Optional[int] = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase : List[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase : Dict = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_UpperCamelCase : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_UpperCamelCase : Union[str, Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
_UpperCamelCase : Dict = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_UpperCamelCase : str = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_UpperCamelCase : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_UpperCamelCase : List[Any] = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_UpperCamelCase : Dict = name.replace('..' , '.' )
if "stem.conv" in name:
_UpperCamelCase : Tuple = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase : List[str] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_UpperCamelCase : Union[str, Any] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase : Dict = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_UpperCamelCase : str = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase : Tuple = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
_UpperCamelCase : List[str] = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A__ ( ):
_UpperCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : int = get_dpt_config(UpperCAmelCase_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase_ )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase : Any = state_dict.pop(UpperCAmelCase_ )
_UpperCamelCase : int = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
_UpperCamelCase : Union[str, Any] = DPTForSemanticSegmentation(UpperCAmelCase_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
# Check outputs on an image
_UpperCamelCase : Tuple = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
_UpperCamelCase : Any = DPTImageProcessor(size=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='pt' )
# forward pass
_UpperCamelCase : Optional[Any] = model(**UpperCAmelCase_ ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase_ ).predicted_depth
if show_prediction:
_UpperCamelCase : List[str] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=UpperCAmelCase_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 236 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case ( lowercase__ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[Any] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Tuple = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_lowerCamelCase : Union[str, Any] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_lowerCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[Any] = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : str = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Optional[int] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_lowerCamelCase : Optional[int] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : int = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_lowerCamelCase : Optional[int] = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : Optional[int] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Any = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Union[str, Any] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_lowerCamelCase : str = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_lowerCamelCase : Any = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_lowerCamelCase : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_lowerCamelCase : int = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_lowerCamelCase : Tuple = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : Any = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : int = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_lowerCamelCase : Optional[int] = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Union[str, Any] = orig_state_dict.pop(__lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Union[str, Any] = key.split('.' )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[str] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : List[str] = val[:dim]
_lowerCamelCase : List[Any] = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : List[str] = key.split('.' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Dict = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : Dict = val[:dim, :]
_lowerCamelCase : List[Any] = val[
dim : dim * 2, :
]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : int = val[:dim]
_lowerCamelCase : Any = val[dim : dim * 2]
_lowerCamelCase : Dict = val[-dim:]
else:
_lowerCamelCase : Dict = rename_key(__lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Dict = val.squeeze_()
else:
_lowerCamelCase : Tuple = val
return orig_state_dict
def _snake_case ( ):
_lowerCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__="groupvit-gcc-yfcc" , lowercase__=False ):
_lowerCamelCase : Optional[Any] = GroupViTConfig()
_lowerCamelCase : int = GroupViTModel(__lowercase ).eval()
_lowerCamelCase : Any = torch.load(__lowercase , map_location='cpu' )['model']
_lowerCamelCase : Optional[int] = convert_state_dict(__lowercase , __lowercase )
_lowerCamelCase, _lowerCamelCase : int = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowercase ) == 0)
# verify result
_lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Tuple = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__lowercase , padding=__lowercase , return_tensors='pt' )
with torch.no_grad():
_lowerCamelCase : Any = model(**__lowercase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : int = torch.tensor([[13.3523, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : List[str] = torch.tensor([[16.1873, 8.6_2_3_0]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , __lowercase , atol=1E-3 )
processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
print('Successfully saved processor and model to' , __lowercase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__lowercase , organization='nielsr' )
model.push_to_hub(__lowercase , organization='nielsr' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["CLIPFeatureExtractor"]
__A = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 348 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = tmp_path / '''file.csv'''
__UpperCamelCase :Any = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = tmp_path / '''malformed_file.csv'''
__UpperCamelCase :Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = tmp_path / '''csv_with_image.csv'''
__UpperCamelCase :str = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = tmp_path / '''csv_with_label.csv'''
__UpperCamelCase :Tuple = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = tmp_path / '''csv_with_int_list.csv'''
__UpperCamelCase :Optional[Any] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = Csv()
__UpperCamelCase :Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__UpperCamelCase :List[Any] = f.read().splitlines()[1]
__UpperCamelCase :List[Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__UpperCamelCase :Optional[int] = csv._generate_tables([[csv_file_with_image]] )
__UpperCamelCase :Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__UpperCamelCase :Tuple = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
__UpperCamelCase :List[Any] = f.read().splitlines()[1:]
__UpperCamelCase :Optional[int] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__UpperCamelCase :List[Any] = csv._generate_tables([[csv_file_with_label]] )
__UpperCamelCase :List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__UpperCamelCase :List[str] = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE : [int(SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__UpperCamelCase :Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] )
__UpperCamelCase :Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__UpperCamelCase :str = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 43 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = '''bert-base-cased'''
UpperCamelCase = '''fp16'''
UpperCamelCase = '''bf16'''
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().setUp()
lowercase__ : Union[str, Any] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
lowercase__ : int = self.dist_env.copy()
lowercase__ : Dict = F'''{i + 1}'''
lowercase__ : str = strategy
with mockenv_context(**lowercase_ ):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
lowercase__ : int = self.dist_env.copy()
lowercase__ : Any = prefetch_policy
with mockenv_context(**lowercase_ ):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __UpperCamelCase ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
lowercase__ : Union[str, Any] = self.dist_env.copy()
lowercase__ : List[Any] = state_dict_type
with mockenv_context(**lowercase_ ):
lowercase__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : int = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase__ : str = self.dist_env.copy()
lowercase__ : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase__ : Tuple = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase__ : List[Any] = "2000"
with mockenv_context(**lowercase_ ):
lowercase__ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase__ : Optional[int] = self.dist_env.copy()
lowercase__ : int = "TRANSFORMER_BASED_WRAP"
lowercase__ : List[Any] = "T5Layer"
with mockenv_context(**lowercase_ ):
lowercase__ : List[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
lowercase__ : Any = self.dist_env.copy()
lowercase__ : str = "SIZE_BASED_WRAP"
lowercase__ : Tuple = "0"
with mockenv_context(**lowercase_ ):
lowercase__ : Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase__ : str = self.dist_env.copy()
lowercase__ : Dict = mp_dtype
with mockenv_context(**lowercase_ ):
lowercase__ : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
lowercase__ : int = torch.floataa
elif mp_dtype == "bf16":
lowercase__ : Union[str, Any] = torch.bfloataa
lowercase__ : Tuple = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase__ : Any = self.dist_env.copy()
lowercase__ : List[str] = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
lowercase__ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
super().setUp()
lowercase__ : str = 0.82
lowercase__ : List[Any] = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase__ : int = {
"multi_gpu_fp16": 32_00,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 20_00,
"fsdp_full_shard_transformer_based_wrap_fp16": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase__ : Optional[Any] = 1_60
lowercase__ : int = 1_60
lowercase__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowercase__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def __UpperCamelCase ( self : Tuple ) -> int:
lowercase__ : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
lowercase__ : Union[str, Any] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase__ : Optional[int] = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
lowercase__ : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowercase_ ):
lowercase__ : List[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowercase__ : Any = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase__ : Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
lowercase__ : Tuple = cmd_config[:-1]
lowercase__ : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : List[str] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
lowercase__ : int = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase__ : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 333 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> float:
UpperCamelCase__ : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase_ ( ) -> int:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
from math import factorial
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__UpperCAmelCase ) // (factorial(__UpperCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 201 | 1 |
'''simple docstring'''
class a__ :
def __init__( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple ):
"""simple docstring"""
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : int , a : Dict ):
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self : Dict ):
"""simple docstring"""
__lowerCamelCase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('''\n''' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : List[Any]=None , a : Union[str, Any]=None ):
"""simple docstring"""
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class a__ :
def __init__( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.parent )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[Any] ):
"""simple docstring"""
if item in self.parent:
return self.find(a )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def SCREAMING_SNAKE_CASE__ ( self : int , a : Any ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.find(a )
__lowerCamelCase = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(a )
__lowerCamelCase = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=a )
return mst
| 237 | '''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Any , *,
a : int = 4 , a : int = 7_68 , a : int , a : Optional[int] , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(a ) )
# parameters for additional clip time embeddings
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.Linear(a , a )
# parameters for encoder hidden states
__lowerCamelCase = clip_extra_context_tokens
__lowerCamelCase = nn.Linear(
a , self.clip_extra_context_tokens * cross_attention_dim )
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.LayerNorm(a )
def SCREAMING_SNAKE_CASE__ ( self : int , *, a : Union[str, Any] , a : Any , a : str , a : Any ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCamelCase = image_embeddings.shape[0]
__lowerCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCamelCase = classifier_free_guidance_embeddings.expand(
a , -1 )
__lowerCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCamelCase = self.embedding_proj(a )
__lowerCamelCase = self.clip_image_embeddings_project_to_time_embeddings(a )
__lowerCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCamelCase = self.clip_extra_context_tokens_proj(a )
__lowerCamelCase = clip_extra_context_tokens.reshape(a , -1 , self.clip_extra_context_tokens )
__lowerCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowerCamelCase = self.encoder_hidden_states_proj(a )
__lowerCamelCase = self.text_encoder_hidden_states_norm(a )
__lowerCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 237 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase : Any =logging.get_logger(__name__)
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 128 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : Tuple =_symbol_database.Default()
UpperCAmelCase : List[Any] =_descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
UpperCAmelCase : Optional[int] =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : str =None
UpperCAmelCase : List[Any] =b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : str =45
UpperCAmelCase : Optional[Any] =1581
UpperCAmelCase : Dict =1517
UpperCAmelCase : str =1570
UpperCAmelCase : Optional[int] =1584
UpperCAmelCase : str =1793
UpperCAmelCase : Any =1795
UpperCAmelCase : Dict =1916
UpperCAmelCase : str =1864
UpperCAmelCase : Dict =1905
UpperCAmelCase : Union[str, Any] =1919
UpperCAmelCase : Any =2429
UpperCAmelCase : Dict =2208
UpperCAmelCase : int =2418
UpperCAmelCase : str =2323
UpperCAmelCase : Any =2407
# @@protoc_insertion_point(module_scope)
| 128 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowercase :
'''simple docstring'''
_A : Dict = XGLMConfig
_A : Dict = {}
_A : str = '''gelu'''
def __init__( self : Optional[int] , _a : Any , _a : Dict=14 , _a : Optional[int]=7 , _a : Dict=True , _a : int=True , _a : List[Any]=True , _a : int=99 , _a : Optional[Any]=32 , _a : Any=2 , _a : Any=4 , _a : List[Any]=37 , _a : List[str]="gelu" , _a : Any=0.1 , _a : Optional[int]=0.1 , _a : Tuple=512 , _a : str=0.02 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = ffn_dim
UpperCamelCase__ = activation_function
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = None
UpperCamelCase__ = 0
UpperCamelCase__ = 2
UpperCamelCase__ = 1
def A_ ( self : Optional[int] ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def A_ ( self : List[Any] ):
UpperCamelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Union[str, Any] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_a , )
def A_ ( self : Dict ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_A : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_A : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_A : str = False
_A : str = False
_A : Dict = False
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFXGLMModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_a , n_embd=37 )
def A_ ( self : Any ):
self.config_tester.run_common_tests()
@slow
def A_ ( self : Any ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFXGLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def A_ ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : List[str] , _a : str=True ):
UpperCamelCase__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase__ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase__ = model.generate(_a , do_sample=_a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _a )
@slow
def A_ ( self : Dict ):
UpperCamelCase__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCamelCase__ = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCamelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCamelCase__ = model.generate(_a , do_sample=_a , seed=[7, 0] )
UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
UpperCamelCase__ = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_a , _a )
@slow
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCamelCase__ = '''left'''
# use different length sentences to test batching
UpperCamelCase__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCamelCase__ = tokenizer(_a , return_tensors='''tf''' , padding=_a )
UpperCamelCase__ = inputs['''input_ids''']
UpperCamelCase__ = model.generate(input_ids=_a , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCamelCase__ = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCamelCase__ = model.generate(input_ids=_a , max_new_tokens=12 )
UpperCamelCase__ = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCamelCase__ = model.generate(input_ids=_a , max_new_tokens=12 )
UpperCamelCase__ = tokenizer.batch_decode(_a , skip_special_tokens=_a )
UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
UpperCamelCase__ = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
| 35 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
_A : Any = '''maskformer'''
_A : Any = {'''hidden_size''': '''mask_feature_size'''}
_A : List[str] = ['''resnet''', '''swin''']
_A : Tuple = ['''detr''']
def __init__( self : Optional[Any] , _a : int = 256 , _a : int = 256 , _a : float = 0.1 , _a : bool = False , _a : Optional[Dict] = None , _a : Optional[Dict] = None , _a : float = 0.02 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 20.0 , _a : Optional[bool] = None , **_a : List[str] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
UpperCamelCase__ = backbone_config.pop('''model_type''' )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_a , _a ):
UpperCamelCase__ = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ = config_class.from_dict(_a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = decoder_config
# main feature dimension for the model
UpperCamelCase__ = fpn_feature_size
UpperCamelCase__ = mask_feature_size
# initializer
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ = cross_entropy_weight
UpperCamelCase__ = dice_weight
UpperCamelCase__ = mask_weight
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = no_object_weight
UpperCamelCase__ = output_auxiliary_logits
UpperCamelCase__ = self.decoder_config.encoder_attention_heads
UpperCamelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def A_ ( cls : Tuple , _a : PretrainedConfig , _a : PretrainedConfig , **_a : str ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def A_ ( self : str ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.decoder_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase__ :Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase__ :int = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase__ :Union[str, Any] = 8
else:
UpperCamelCase__ :Any = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCamelCase__ :Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
UpperCamelCase__ :Union[str, Any] = 2
# New Code #
UpperCamelCase__ :Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCamelCase__ :int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Union[str, Any] = config['''lr''']
UpperCamelCase__ :Dict = int(config['''num_epochs'''] )
UpperCamelCase__ :str = int(config['''seed'''] )
UpperCamelCase__ :Optional[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :Tuple = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ :Union[str, Any] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
UpperCamelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
UpperCamelCase__ :Optional[Any] = model(**__a )
UpperCamelCase__ :List[Any] = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :str = outputs.logits.argmax(dim=-1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __a )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__a , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCamelCase__ :Tuple = parser.parse_args()
UpperCamelCase__ :List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main() | 97 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 370 |
"""simple docstring"""
import argparse
import datetime
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
_lowerCamelCase : str = y - 1
_lowerCamelCase : Tuple = m + 12
# maths var
_lowerCamelCase : int = int(str(lowercase__ )[:2] )
_lowerCamelCase : int = int(str(lowercase__ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.3_9 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowercase__ = parser.parse_args()
zeller(args.date_input) | 12 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a__ : Any =get_tests_dir('''fixtures''')
a__ : int =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
a__ : int =get_tests_dir('''fixtures/dummy-config.json''')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = 0
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A ).to_dict()
config_dict.pop('feature_extractor_type' )
__UpperCamelCase = WavaVecaFeatureExtractor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
__UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowerCamelCase ( self : Dict ):
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCamelCase ( self : List[Any] ):
with self.assertRaisesRegex(
__A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A , revision='aaaaaa' )
def _lowerCamelCase ( self : List[str] ):
with self.assertRaisesRegex(
__A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCamelCase ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCamelCase ( self : List[str] ):
try:
AutoConfig.register('custom' , __A )
AutoFeatureExtractor.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoFeatureExtractor.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Any ):
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =True
try:
AutoConfig.register('custom' , __A )
AutoFeatureExtractor.register(__A , __A )
# If remote code is not set, the default is to use local
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__A , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 53 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53 | 1 |
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[Any] = [], []
while len(snake_case_ ) > 1:
snake_case_ : Optional[Any] = min(snake_case_ ), max(snake_case_ )
start.append(snake_case_ )
end.append(snake_case_ )
collection.remove(snake_case_ )
collection.remove(snake_case_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowercase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 355 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 1 |
import math
def lowercase_ ( A__ ) -> bool:
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case = range(3 , int(math.sqrt(A__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowercase_ ( A__ , A__=1 , **A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = factor * value
snake_case = value
while not is_prime(A__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **A__ )
return value
| 137 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase_ ( A__ ) -> list[list[float]]:
"""simple docstring"""
snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case = [[0.0, 0.0], [0.0, 0.0]]
snake_case , snake_case = matrix[1][1], matrix[0][0]
snake_case , snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 137 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCAmelCase : List[Any] = 8
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=BITS) -> List[str]:
__snake_case: Tuple = x.device
__snake_case: str = (x * 255).int().clamp(0 , 255)
__snake_case: List[str] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE__ , """d -> d 1 1""")
__snake_case: Any = rearrange(SCREAMING_SNAKE_CASE__ , """b c h w -> b c 1 h w""")
__snake_case: Optional[Any] = ((x & mask) != 0).float()
__snake_case: Tuple = rearrange(SCREAMING_SNAKE_CASE__ , """b c d h w -> b (c d) h w""")
__snake_case: int = bits * 2 - 1
return bits
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=BITS) -> int:
__snake_case: List[str] = x.device
__snake_case: Union[str, Any] = (x > 0).int()
__snake_case: List[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE__ , dtype=torch.intaa)
__snake_case: List[str] = rearrange(SCREAMING_SNAKE_CASE__ , """d -> d 1 1""")
__snake_case: List[str] = rearrange(SCREAMING_SNAKE_CASE__ , """b (c d) h w -> b c d h w""" , d=8)
__snake_case: Optional[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""")
return (dec / 255).clamp(0.0 , 1.0)
def A__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""")
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case: Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case: Any = self.alphas_cumprod[timestep]
__snake_case: Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case: Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case: Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case: List[Any] = self.bit_scale
if self.config.clip_sample:
__snake_case: List[str] = torch.clamp(SCREAMING_SNAKE_CASE__ , -scale , SCREAMING_SNAKE_CASE__)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case: Optional[Any] = self._get_variance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case: List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case: Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case: Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case: Optional[int] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE__) else """cpu"""
__snake_case: Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = self._get_variance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) ** 0.5 * eta * noise
__snake_case: Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__)
def A__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="epsilon" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case: Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case: Optional[int] = torch.split(SCREAMING_SNAKE_CASE__ , sample.shape[1] , dim=1)
else:
__snake_case: List[str] = None
# 1. compute alphas, betas
__snake_case: List[str] = self.alphas_cumprod[t]
__snake_case: Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case: List[Any] = 1 - alpha_prod_t
__snake_case: Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case: List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case: Optional[int] = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''')
# 3. Clip "predicted x_0"
__snake_case: int = self.bit_scale
if self.config.clip_sample:
__snake_case: int = torch.clamp(SCREAMING_SNAKE_CASE__ , -scale , SCREAMING_SNAKE_CASE__)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case: Dict = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case: Union[str, Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case: List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case: str = 0
if t > 0:
__snake_case: Union[str, Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE__).to(model_output.device)
__snake_case: Union[str, Any] = (self._get_variance(SCREAMING_SNAKE_CASE__ , predicted_variance=SCREAMING_SNAKE_CASE__) ** 0.5) * noise
__snake_case: Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , A : UNetaDConditionModel , A : Union[DDIMScheduler, DDPMScheduler] , A : Optional[float] = 1.0 , ):
super().__init__()
__snake_case: Optional[int] = bit_scale
__snake_case: List[Any] = (
ddim_bit_scheduler_step if isinstance(A , A ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : Any , A : Optional[int] = 256 , A : Optional[int] = 256 , A : Optional[int] = 50 , A : Optional[torch.Generator] = None , A : Optional[int] = 1 , A : Optional[str] = "pil" , A : bool = True , **A : Tuple , ):
__snake_case: Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A , )
__snake_case: int = decimal_to_bits(A ) * self.bit_scale
__snake_case: List[Any] = latents.to(self.device )
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case: List[str] = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case: int = self.scheduler.step(A , A , A ).prev_sample
__snake_case: int = bits_to_decimal(A )
if output_type == "pil":
__snake_case: Optional[int] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 111 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Dict , A : Any ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(A ) for s in shape] )}.npy'''
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int]=0 , A : Tuple=(4, 4, 64, 64) , A : Tuple=False ):
__snake_case: Dict = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: str = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return image
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=False , A : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
__snake_case: List[Any] = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Union[str, Any] = """bf16""" if fpaa else None
__snake_case , __snake_case: Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
A , subfolder="""unet""" , dtype=A , revision=A )
return model, params
def UpperCAmelCase__ ( self : Tuple , A : Tuple=0 , A : str=(4, 77, 768) , A : List[str]=False ):
__snake_case: Any = jnp.bfloataa if fpaa else jnp.floataa
__snake_case: Dict = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Any] , A : str , A : Any ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A )
__snake_case: Tuple = self.get_latents(A , fpaa=A )
__snake_case: int = self.get_encoder_hidden_states(A , fpaa=A )
__snake_case: List[Any] = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Optional[int] = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Tuple , A : List[str] ):
__snake_case , __snake_case: Union[str, Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A )
__snake_case: Optional[int] = self.get_latents(A , shape=(4, 4, 96, 96) , fpaa=A )
__snake_case: str = self.get_encoder_hidden_states(A , shape=(4, 77, 1_024) , fpaa=A )
__snake_case: str = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
__snake_case: Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__snake_case: Any = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A , A , atol=1E-2 )
| 111 | 1 |
from __future__ import annotations
from collections.abc import Callable
SCREAMING_SNAKE_CASE_ = list[list[float | int]]
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(__A )
SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__A )]
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for row in range(__A ):
for col in range(__A ):
SCREAMING_SNAKE_CASE = matrix[row][col]
SCREAMING_SNAKE_CASE = vector[row][0]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__A , __A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __A ):
SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __A ):
for row in range(__A ):
SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__A )
]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(__A )
SCREAMING_SNAKE_CASE = [[0 for _ in range(__A )] for _ in range(__A )]
SCREAMING_SNAKE_CASE = [[0] for _ in range(__A )]
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__A ):
for col in range(__A ):
SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE = y_val
SCREAMING_SNAKE_CASE = solve(__A , __A )
def interpolated_func(_SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__A ) )
return interpolated_func
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowercase ( _SCREAMING_SNAKE_CASE = question_function , _SCREAMING_SNAKE_CASE = 10 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [func(__A ) for x_val in range(1 , order + 1 )]
SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE = 1
while func(__A ) == poly(__A ):
x_val += 1
ret += poly(__A )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 357 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase__ ,""" -> """ ,""" -> """.join([str(lowerCamelCase__ ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : list ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
print(lowerCamelCase__ ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase__ ,lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 193 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_UpperCamelCase = '''xvjiarui/stable-diffusion-2-inpainting'''
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
_UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = 50
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = num_samples * [init_image]
_UpperCamelCase = num_samples * [mask_image]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pipeline.prepare_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# shard inputs and rng
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipeline(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ )
_UpperCamelCase = output.images.reshape(lowerCAmelCase__ , 512 , 512 , 3 )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 324 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase : List[Any] = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = True , ) -> List[str]:
snake_case : int = [file for file in os.listdir(A ) if os.path.isfile(os.path.join(A , A ) )]
if identifier is not None:
snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A , A ):
for n_ in n_identifier:
snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
snake_case : int = [file for file in files if n_identifier not in file]
snake_case : Any = ignore_files or []
ignore_files.append("""__init__.py""" )
snake_case : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , A )
if only_modules:
snake_case : Any = file.split(""".""" )[0]
try:
snake_case : List[Any] = getattr(A , A )
snake_case : List[Any] = doctest.DocTestSuite(A )
snake_case : Tuple = unittest.TextTestRunner().run(A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = Path("""src/transformers""" )
snake_case : str = """modeling"""
snake_case : Tuple = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(A , identifier=A , ignore_files=A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = Path("""src/transformers""" )
snake_case : List[str] = """tokenization"""
self.analyze_directory(A , identifier=A )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = Path("""src/transformers""" )
snake_case : str = """configuration"""
self.analyze_directory(A , identifier=A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = Path("""src/transformers""" )
snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(A , n_identifier=A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = Path("""docs/source""" )
snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(A , ignore_files=A , only_modules=A )
| 176 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> int:
snake_case : Any = {}
if frame_sampling_rate is not None:
snake_case : int = frame_sampling_rate
if num_frames is not None:
snake_case : Union[str, Any] = num_frames
snake_case : str = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> Dict:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Tuple = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : Optional[int] = BytesIO(requests.get(A ).content )
snake_case : Optional[Any] = VideoReader(A )
videoreader.seek(0 )
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = num_frames * frame_sampling_rate - 1
snake_case : Any = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : int = videoreader.get_batch(A ).asnumpy()
snake_case : List[Any] = list(A )
snake_case : Optional[Any] = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Dict = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case : str = self.model.config.num_labels
if self.framework == "pt":
snake_case : List[Any] = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Tuple = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : List[Any] = scores.tolist()
snake_case : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 176 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : str = jnp.ones((batch_size, length)) / length
return scores
def __snake_case ( self : Optional[Any]):
a : Optional[int] = None
a : Optional[Any] = 20
a : Optional[int] = self._get_uniform_logits(batch_size=2 , length=__UpperCAmelCase)
# tweak scores to not be uniform anymore
a : int = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
a : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
a : Dict = jax.nn.softmax(__UpperCAmelCase , axis=-1)
a : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTemperatureLogitsWarper(temperature=1.3)
a : Any = jax.nn.softmax(temp_dist_warper_sharper(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
a : Any = jax.nn.softmax(temp_dist_warper_smoother(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def __snake_case ( self : List[str]):
a : Optional[int] = None
a : Dict = 10
a : str = 2
# create ramp distribution
a : Union[str, Any] = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy()
a : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
a : str = FlaxTopKLogitsWarper(3)
a : Union[str, Any] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
a : List[Any] = 5
a : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
a : Dict = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, length)).copy()
a : Any = top_k_warp_safety_check(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def __snake_case ( self : Tuple):
a : List[Any] = None
a : int = 10
a : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
a : List[Any] = FlaxTopPLogitsWarper(0.8)
a : List[str] = np.exp(top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# check edge cases with negative and extreme logits
a : int = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
a : str = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def __snake_case ( self : List[str]):
a : Union[str, Any] = 20
a : Any = 4
a : Any = 0
a : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
# check that min length is applied at length 5
a : Dict = ids_tensor((batch_size, 20) , vocab_size=20)
a : Any = 5
a : List[str] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
a : Any = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = 15
a : Dict = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : str):
a : Dict = 20
a : Any = 4
a : str = 0
a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the bos_token_id score
a : Dict = ids_tensor((batch_size, 1) , vocab_size=20)
a : List[Any] = 1
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : int = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a : Optional[Any] = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Dict):
a : List[Any] = 20
a : Tuple = 4
a : List[Any] = 0
a : str = 5
a : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
# check that all scores are -inf except the eos_token_id when max_length is reached
a : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20)
a : Optional[int] = 4
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : str = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a : Any = 3
a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
self.assertFalse(jnp.isinf(__UpperCAmelCase).any())
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = 4
a : int = 10
a : int = 15
a : int = 2
a : List[str] = 1
a : Dict = 15
# dummy input_ids and scores
a : Any = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[Any] = input_ids.copy()
a : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = scores.copy()
# instantiate all dist processors
a : int = FlaxTemperatureLogitsWarper(temperature=0.5)
a : int = FlaxTopKLogitsWarper(3)
a : int = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Tuple = 10
# no processor list
a : str = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Optional[Any] = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Dict = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# with processor list
a : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : List[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def __snake_case ( self : Optional[Any]):
a : Dict = 4
a : Any = 10
a : Dict = 15
a : Union[str, Any] = 2
a : Tuple = 1
a : int = 15
# dummy input_ids and scores
a : List[str] = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase)
a : List[str] = input_ids.copy()
a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = scores.copy()
# instantiate all dist processors
a : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
a : Dict = FlaxTopKLogitsWarper(3)
a : List[str] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
a : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase)
a : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase)
a : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase)
a : Optional[int] = 10
# no processor list
def run_no_processor_list(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : List[str] = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[str] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : int = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
a : List[Any] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
# with processor list
def run_processor_list(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]):
a : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
a : Dict = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)
return scores
a : List[Any] = jax.jit(__UpperCAmelCase)
a : Union[str, Any] = jax.jit(__UpperCAmelCase)
a : List[Any] = jitted_run_no_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : List[str] = jitted_run_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 40 |
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = False, False, False
@dataclass
class _snake_case :
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : Optional[str] = None
# Automatically constructed
lowerCAmelCase_ : ClassVar[str] = "dict"
lowerCAmelCase_ : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase_ : str = field(default="Audio" , init=lowercase_ , repr=lowercase_ )
def __call__( self ) -> Tuple:
'''simple docstring'''
return self.pa_type
def lowerCAmelCase__ ( self , a__ ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(a__ , a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ , a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case_ = BytesIO()
sf.write(a__ , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
snake_case_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32_767
snake_case_ = BytesIO(bytes() )
sf.write(a__ , a__ , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
snake_case_ , snake_case_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
snake_case_ = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
snake_case_ = token_per_repo_id or {}
snake_case_ = path.split("::" )[-1]
try:
snake_case_ = string_to_dict(a__ , config.HUB_DATASETS_URL )["repo_id"]
snake_case_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case_ = None
with xopen(a__ , "rb" , use_auth_token=a__ ) as f:
snake_case_ , snake_case_ = sf.read(a__ )
else:
snake_case_ , snake_case_ = sf.read(a__ )
snake_case_ = array.T
if self.mono:
snake_case_ = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case_ = librosa.resample(a__ , orig_sr=a__ , target_sr=self.sampling_rate )
snake_case_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCAmelCase__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCAmelCase__ ( self , a__ ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
snake_case_ = pa.array([None] * len(a__ ) , type=pa.binary() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case_ = pa.array([None] * len(a__ ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
snake_case_ = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
snake_case_ = storage.field("bytes" )
else:
snake_case_ = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
snake_case_ = storage.field("path" )
else:
snake_case_ = pa.array([None] * len(a__ ) , type=pa.string() )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(a__ , self.pa_type )
def lowerCAmelCase__ ( self , a__ ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(a__ ):
with xopen(a__ , "rb" ) as f:
snake_case_ = f.read()
return bytes_
snake_case_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case_ = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
snake_case_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type )
| 92 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
snake_case_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ = tf.placeholder("float64" , [dim] )
snake_case_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ = tf.placeholder("int32" )
snake_case_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ = tf.placeholder("float" , [noofclusters] )
snake_case_ = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ = 1_0_0
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
snake_case_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
snake_case_ = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ = sess.run(snake_case )
snake_case_ = sess.run(snake_case )
return centroids, assignments
| 92 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=1_25 , lowercase=None , **lowercase , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a__: Optional[Any] = [f'<extra_id_{i}>' for i in range(lowercase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a__: Optional[int] = len(set(filter(lambda lowercase: bool('extra_id' in str(lowercase)) , lowercase)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens')
a__: Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else pad_token
a__: Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else eos_token
a__: Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else unk_token
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__: Union[str, Any] = extra_ids
a__: str = 2**8 # utf is 8 bits
# define special tokens dict
a__: Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a__: Dict = len(self.special_tokens_encoder)
a__: List[Any] = len(lowercase)
for i, token in enumerate(lowercase):
a__: Union[str, Any] = self.vocab_size + i - n
a__: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase)) + [1]
return ([0] * len(lowercase)) + [1] + ([0] * len(lowercase)) + [1]
def lowerCamelCase_ ( self , lowercase) -> List[int]:
'''simple docstring'''
if len(lowercase) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Dict = self._add_eos_if_not_present(lowercase)
if token_ids_a is None:
return token_ids_a
else:
a__: Optional[Any] = self._add_eos_if_not_present(lowercase)
return token_ids_a + token_ids_a
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: int = [chr(lowercase) for i in text.encode('utf-8')]
return tokens
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
if token in self.special_tokens_encoder:
a__: List[str] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a__: Union[str, Any] = self.added_tokens_encoder[token]
elif len(lowercase) != 1:
a__: List[Any] = self.unk_token_id
else:
a__: int = ord(lowercase) + self._num_special_tokens
return token_id
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
if index in self.special_tokens_decoder:
a__: Tuple = self.special_tokens_decoder[index]
else:
a__: Tuple = chr(index - self._num_special_tokens)
return token
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = B''
for token in tokens:
if token in self.special_tokens_decoder:
a__: Optional[Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.added_tokens_decoder:
a__: List[Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.special_tokens_encoder:
a__: Any = token.encode('utf-8')
elif token in self.added_tokens_encoder:
a__: Union[str, Any] = token.encode('utf-8')
else:
a__: Union[str, Any] = bytes([ord(lowercase)])
bstring += tok_string
a__: str = bstring.decode('utf-8' , errors='ignore')
return string
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
return ()
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( __lowerCAmelCase ):
a__ = """decision_transformer"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=1_28 , lowercase=40_96 , lowercase=True , lowercase=1 , lowercase=10_24 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=5_02_56 , lowercase=5_02_56 , lowercase=False , lowercase=False , **lowercase , ) -> Tuple:
'''simple docstring'''
a__: List[str] = state_dim
a__: int = act_dim
a__: List[Any] = hidden_size
a__: List[str] = max_ep_len
a__: List[Any] = action_tanh
a__: Optional[Any] = vocab_size
a__: Tuple = n_positions
a__: Dict = n_layer
a__: Optional[int] = n_head
a__: Optional[int] = n_inner
a__: Any = activation_function
a__: Union[str, Any] = resid_pdrop
a__: Any = embd_pdrop
a__: Any = attn_pdrop
a__: List[Any] = layer_norm_epsilon
a__: Optional[Any] = initializer_range
a__: Any = scale_attn_weights
a__: Dict = use_cache
a__: Optional[int] = scale_attn_by_inverse_layer_idx
a__: List[str] = reorder_and_upcast_attn
a__: Any = bos_token_id
a__: int = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
| 290 | 1 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE : Optional[Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE : int = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
SCREAMING_SNAKE_CASE : Optional[int] = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE : Any = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE : str = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE : Dict = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE : Any = "Normal"
if result[0][0] == 1:
SCREAMING_SNAKE_CASE : Optional[int] = "Abnormality detected"
| 366 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :int = logging.get_logger()
# the current default level is logging.WARNING
lowercase_ :List[str] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Tuple = logging.get_verbosity()
lowercase_ :str = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase_ :Any = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase_ )
lowercase_ :Any = logging.log_levels[env_level_str]
lowercase_ :Optional[int] = logging.get_verbosity()
self.assertEqual(
UpperCamelCase_ , UpperCamelCase_ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
lowercase_ :str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowercase_ :Any = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def UpperCamelCase ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowercase_ :Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Any = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 252 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_:List[Any] = TypeVar("""KEY""")
SCREAMING_SNAKE_CASE_:Dict = TypeVar("""VAL""")
@dataclass(frozen=SCREAMING_SNAKE_CASE__ , slots=SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__lowerCamelCase : KEY
__lowerCamelCase : VAL
class SCREAMING_SNAKE_CASE__ ( _Item ):
'''simple docstring'''
def __init__( self ):
super().__init__(lowerCamelCase__, lowerCamelCase__ )
def __bool__( self ):
return False
SCREAMING_SNAKE_CASE_:Optional[Any] = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 8, lowerCamelCase__ = 0.75 ):
A : List[str] = initial_block_size
A : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A : Dict = capacity_factor
A : Optional[Any] = 0
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return hash(lowerCamelCase__ ) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (ind + 1) % len(self._buckets )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[Any] = self._buckets[ind]
if not stored:
A : Optional[int] = _Item(lowerCamelCase__, lowerCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A : int = _Item(lowerCamelCase__, lowerCamelCase__ )
return True
else:
return False
def _lowerCAmelCase ( self ):
A : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
if len(self._buckets ) <= self._initial_block_size:
return False
A : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Dict = self._buckets
A : Optional[Any] = [None] * new_size
A : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) * 2 )
def _lowerCAmelCase ( self ):
self._resize(len(self._buckets ) // 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Union[str, Any] = self._get_bucket_index(lowerCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A : Dict = self._get_next_ind(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
if self._try_set(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
break
def __setitem__( self, lowerCamelCase__, lowerCamelCase__ ):
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase__, lowerCamelCase__ )
def __delitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : Tuple = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, lowerCamelCase__ ):
for ind in self._iterate_buckets(lowerCamelCase__ ):
A : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase__ )
def __len__( self ):
return self._len
def __iter__( self ):
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
A : Union[str, Any] = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 116 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a :
def __init__( self , A_ , A_=13 , A_=7 , A_=False , A_=True , A_=False , A_=True , A_=33 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : Tuple = scope
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = EsmModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ )
_UpperCAmelCase : Union[str, Any] = model(A_ )
_UpperCAmelCase : Any = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = EsmForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = EsmForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Dict = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = False
_lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase = ()
_lowercase = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase = True
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = EsmModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=A_ , hidden_size=37 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = EsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase : int = EsmEmbeddings(config=A_ )
_UpperCAmelCase : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_UpperCAmelCase : str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_UpperCAmelCase : str = create_position_ids_from_input_ids(A_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(A_ , A_ ) ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase : List[Any] = EsmEmbeddings(config=A_ )
_UpperCAmelCase : Union[str, Any] = torch.empty(2 , 4 , 30 )
_UpperCAmelCase : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_UpperCAmelCase : Optional[Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
_UpperCAmelCase : Tuple = embeddings.create_position_ids_from_inputs_embeds(A_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(A_ , A_ ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
class a ( UpperCAmelCase ):
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : List[str] = model(A_ )[0]
_UpperCAmelCase : List[str] = 33
_UpperCAmelCase : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , A_ )
_UpperCAmelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : Any = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase : Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase : List[Any] = model(A_ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : Dict = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
| 360 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
def __init__( self , A_ = None ):
'''simple docstring'''
if components is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Dict = list(A_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = len(self )
if size == len(A_ ):
_UpperCAmelCase : Union[str, Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception("must have the same size" )
def __sub__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = len(self )
if size == len(A_ ):
_UpperCAmelCase : Optional[Any] = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , (float, int) ):
_UpperCAmelCase : str = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
_UpperCAmelCase : int = len(self )
_UpperCAmelCase : Any = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception("invalid operand!" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return Vector(self.__components )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase : Any = value
def _UpperCAmelCase ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
_UpperCAmelCase : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self * other
_UpperCAmelCase : str = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_UpperCAmelCase : Tuple = [0] * dimension
_UpperCAmelCase : Optional[Any] = 1
return Vector(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: float , lowerCAmelCase: Vector , lowerCAmelCase: Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Vector:
random.seed(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class a :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = matrix
_UpperCAmelCase : Union[str, Any] = w
_UpperCAmelCase : str = h
def __str__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[int] = []
for i in range(self.__height ):
_UpperCAmelCase : List[str] = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase : Optional[Any] = []
for i in range(self.__height ):
_UpperCAmelCase : Tuple = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
_UpperCAmelCase : Union[str, Any] = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase : List[Any] = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A_ , (int, float) ): # matrix-scalar
_UpperCAmelCase : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__height
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.__width
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase : Tuple = value
else:
raise Exception("change_component: indices out of bounds" )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
_UpperCAmelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
_UpperCAmelCase : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception("Indices out of bounds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase : Dict = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Matrix:
_UpperCAmelCase : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> Matrix:
random.seed(lowerCAmelCase )
_UpperCAmelCase : list[list[float]] = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 189 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __lt__( self , __lowercase ) -> List[str]:
return self[-1] < other[-1]
def __eq__( self , __lowercase ) -> List[Any]:
return self[-1] == other[-1]
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
lowerCAmelCase_ : list[Stack] = []
# sort into stacks
for element in collection:
lowerCAmelCase_ : Optional[int] = Stack([element] )
lowerCAmelCase_ : Dict = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if i != len(lowerCAmelCase_ ):
stacks[i].append(lowerCAmelCase_ )
else:
stacks.append(lowerCAmelCase_ )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase_ : Optional[int] = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] =input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase : List[str] =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted)) | 262 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase : List[Any] = datasets.load_iris()
lowerCAmelCase : List[str] = np.array(data['data'])
lowerCAmelCase : Any = np.array(data['target'])
lowerCAmelCase : Dict = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = train_test_split(X, y)
def A_ ( a , a ):
"""simple docstring"""
return np.linalg.norm(np.array(a ) - np.array(a ) )
def A_ ( a , a , a , a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = zip(a , a )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE_ : List[Any] = []
for data_point in data:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE_ : List[str] = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE_ : List[Any] = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _a):
snake_case__ : Union[str, Any] = """retribert"""
def __init__( self : Any , __lowerCAmelCase : Tuple=3_0_5_2_2 , __lowerCAmelCase : Union[str, Any]=7_6_8 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Any=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : int=1E-12 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=1_2_8 , __lowerCAmelCase : int=0 , **__lowerCAmelCase : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : int = type_vocab_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Tuple = projection_dim
| 368 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_lowerCamelCase : Union[str, Any] = int(re.match(R'''.*layer_(\d*).*''', A_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
_lowerCamelCase : List[str] = re.search(R'''[^\d](\d+)$''', str(A_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
_lowerCamelCase : Optional[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case_ ( A_ : str, A_ : Any, A_ : int, A_ : List[str], A_ : Any ):
'''simple docstring'''
if bloom_config_file == "":
_lowerCamelCase : Dict = BloomConfig()
else:
_lowerCamelCase : Any = BloomConfig.from_json_file(A_ )
if shard_model:
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : List[str] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : str = {'''weight_map''': {}, '''metadata''': {}}
_lowerCamelCase : List[str] = 0
_lowerCamelCase : str = None
_lowerCamelCase : str = BloomConfig()
for j, file in enumerate(A_ ):
print('''Processing file: {}'''.format(A_ ) )
_lowerCamelCase : List[Any] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : Any = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : Any = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : Optional[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : List[Any] = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : Optional[Any] = tensors[key] / pretraining_tp
torch.save(
A_, os.path.join(
A_, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_lowerCamelCase : str = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_lowerCamelCase : Tuple = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) )
_lowerCamelCase : List[Any] = BloomConfig()
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_lowerCamelCase : Union[str, Any] = total_size
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
_lowerCamelCase : Any = json.dumps(A_, indent=2, sort_keys=A_ ) + '''\n'''
f.write(A_ )
else:
_lowerCamelCase : Tuple = BloomModel(A_ )
_lowerCamelCase : Optional[int] = os.listdir(A_ )
_lowerCamelCase : Union[str, Any] = sorted(filter(lambda A_ : s.startswith('''layer''' ) and "model_00" in s, A_ ) )
_lowerCamelCase : int = None
for i, file in enumerate(A_ ):
_lowerCamelCase : Optional[int] = None
for i in range(A_ ):
# load all TP files
_lowerCamelCase : str = file.replace('''model_00''', F'''model_0{i}''' )
_lowerCamelCase : List[Any] = torch.load(os.path.join(A_, A_ ), map_location='''cpu''' )
# Rename keys in the transformers names
_lowerCamelCase : List[Any] = list(temp.keys() )
for key in keys:
_lowerCamelCase : Dict = temp.pop(A_ )
if tensors is None:
_lowerCamelCase : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_lowerCamelCase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_lowerCamelCase : Optional[Any] = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_lowerCamelCase : List[Any] = tensors[key] / pretraining_tp
_lowerCamelCase : List[str] = model.load_state_dict(A_, strict=A_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_lowerCamelCase : Optional[Any] = set(other_keys.missing_keys )
else:
_lowerCamelCase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_, exist_ok=A_ )
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCamelCase : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
_lowerCamelCase : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict(), A_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 175 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'swin2sr'
lowerCAmelCase : str = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Tuple=64 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : Any=180 ,_UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : Any=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : int=8 ,_UpperCAmelCase : Any=2.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1E-5 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Tuple=1.0 ,_UpperCAmelCase : Union[str, Any]="1conv" ,_UpperCAmelCase : Tuple="pixelshuffle" ,**_UpperCAmelCase : List[str] ,):
super().__init__(**_UpperCAmelCase )
_a : List[str] = image_size
_a : Dict = patch_size
_a : Optional[int] = num_channels
_a : Optional[int] = embed_dim
_a : Union[str, Any] = depths
_a : Optional[int] = len(_UpperCAmelCase )
_a : Optional[Any] = num_heads
_a : str = window_size
_a : Optional[Any] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Tuple = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Any = drop_path_rate
_a : str = hidden_act
_a : Tuple = use_absolute_embeddings
_a : Dict = layer_norm_eps
_a : Any = initializer_range
_a : Optional[Any] = upscale
_a : int = img_range
_a : Union[str, Any] = resi_connection
_a : int = upsampler
| 89 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class A ( lowerCamelCase__ ):
'''simple docstring'''
A__ = '''speech_to_text_2'''
A__ = ['''past_key_values''']
A__ = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Union[str, Any] , _UpperCAmelCase : Optional[int]=1_0000 , _UpperCAmelCase : Optional[int]=6 , _UpperCAmelCase : Optional[int]=2048 , _UpperCAmelCase : int=4 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict="relu" , _UpperCAmelCase : int=256 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : str=1024 , **_UpperCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = decoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_target_positions
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , **__snake_case , )
| 358 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = len(_UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCAmelCase )}.''' )
# get prompt text embeddings
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !='''
f''' {type(_UpperCAmelCase )}.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(
self.device )
else:
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = 1 / 0.18_215 * latents
lowercase__ = self.vae.decode(_UpperCAmelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 146 | 0 |
from typing import Any
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = data
__lowerCAmelCase = None
class a__ :
def __init__( self ):
"""simple docstring"""
__lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
__lowerCAmelCase = temp.next
print()
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = Node(_A )
__lowerCAmelCase = self.head
__lowerCAmelCase = new_node
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
__lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
__lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
__lowerCAmelCase , __lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 92 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case__ , unittest.TestCase ):
_a : Dict = KandinskyImgaImgPipeline
_a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_a : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_a : int = False
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_0_0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCAmelCase = MultilingualCLIP(_A )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**_A )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 92 | 1 |
"""simple docstring"""
def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : int ) -> float:
if digit_amount > 0:
return round(number - int(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return number - int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 351 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
__lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean()
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 339 | 0 |
# flake8: noqa
# Lint as: python3
_UpperCAmelCase : Dict =[
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 262 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae"""
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str:
super().__init__(**__lowercase )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : List[str] = qkv_bias
lowerCAmelCase_ : List[Any] = decoder_num_attention_heads
lowerCAmelCase_ : int = decoder_hidden_size
lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers
lowerCAmelCase_ : Tuple = decoder_intermediate_size
lowerCAmelCase_ : Tuple = mask_ratio
lowerCAmelCase_ : Any = norm_pix_loss | 262 | 1 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = "linear"
snake_case__ = "cosine"
snake_case__ = "cosine_with_restarts"
snake_case__ = "polynomial"
snake_case__ = "constant"
snake_case__ = "constant_with_warmup"
snake_case__ = "piecewise_constant"
def lowercase_ ( _lowerCamelCase: Optimizer , _lowerCamelCase: int = -1 ) -> int:
'''simple docstring'''
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optimizer , _lowerCamelCase: int , _lowerCamelCase: int = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(_lowerCamelCase: int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optimizer , _lowerCamelCase: str , _lowerCamelCase: int = -1 ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : str = step_rules.split("," )
for rule_str in rule_list[:-1]:
__lowerCamelCase , __lowerCamelCase : int = rule_str.split(":" )
__lowerCamelCase : Union[str, Any] = int(_lowerCamelCase )
__lowerCamelCase : List[str] = float(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = value
__lowerCamelCase : Any = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase: Any , _lowerCamelCase: Dict ):
def rule_func(_lowerCamelCase: int ) -> float:
__lowerCamelCase : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__lowerCamelCase : List[str] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[Any] , _lowerCamelCase: int , _lowerCamelCase: List[str]=-1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(_lowerCamelCase: int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optimizer , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: float = 0.5 , _lowerCamelCase: int = -1 ) -> Any:
'''simple docstring'''
def lr_lambda(_lowerCamelCase: str ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__lowerCamelCase : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optimizer , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int = 1 , _lowerCamelCase: int = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(_lowerCamelCase: Optional[Any] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__lowerCamelCase : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=1E-7 , _lowerCamelCase: Optional[Any]=1.0 , _lowerCamelCase: str=-1 ) -> Dict:
'''simple docstring'''
__lowerCamelCase : int = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_lowerCamelCase: int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__lowerCamelCase : Dict = lr_init - lr_end
__lowerCamelCase : Optional[Any] = num_training_steps - num_warmup_steps
__lowerCamelCase : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps
__lowerCamelCase : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__A = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowercase_ ( _lowerCamelCase: Union[str, SchedulerType] , _lowerCamelCase: Optimizer , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: int = 1 , _lowerCamelCase: float = 1.0 , _lowerCamelCase: int = -1 , ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = SchedulerType(_lowerCamelCase )
__lowerCamelCase : Any = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase ) | 64 | """simple docstring"""
import requests
__A = '''YOUR API KEY'''
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str = giphy_api_key ) -> list:
'''simple docstring'''
__lowerCamelCase : Dict = "+".join(query.split() )
__lowerCamelCase : Optional[int] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__lowerCamelCase : Optional[Any] = requests.get(_lowerCamelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship'''))) | 64 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : Any = 42
A_ : int = 42
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Tuple , a__ : Tuple ):
"""simple docstring"""
__snake_case = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
__snake_case = Node(__UpperCAmelCase , self.head )
def __iter__(self : Any ):
"""simple docstring"""
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next_node
def __len__(self : Optional[Any] ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self : int ):
"""simple docstring"""
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def lowerCamelCase__ ( snake_case_ : SortedLinkedList , snake_case_ : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 24 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :List[nn.Module] = field(default_factory=__a )
__SCREAMING_SNAKE_CASE :list = field(default_factory=__a )
def snake_case__ ( self : List[str] , a__ : str , a__ : Tensor , a__ : Tensor ):
__magic_name__ = len(list(m.modules() ) ) == 1 or isinstance(a__ , nn.Convad ) or isinstance(a__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a__ )
def __call__( self : List[Any] , a__ : Tensor ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self : Dict ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :nn.Module
__SCREAMING_SNAKE_CASE :int = 0
__SCREAMING_SNAKE_CASE :List = field(default_factory=__a )
__SCREAMING_SNAKE_CASE :List = field(default_factory=__a )
def __call__( self : int , a__ : Tensor ):
__magic_name__ = Tracker(self.dest )(a__ ).parametrized
__magic_name__ = Tracker(self.src )(a__ ).parametrized
__magic_name__ = list(filter(lambda a__ : type(a__ ) not in self.src_skip , a__ ) )
__magic_name__ = list(filter(lambda a__ : type(a__ ) not in self.dest_skip , a__ ) )
if len(a__ ) != len(a__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(a__ )} operations while'''
F''' destination module has {len(a__ )}.''' )
for dest_m, src_m in zip(a__ , a__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def UpperCamelCase ( a , a , a , a = True ) -> Any:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
__magic_name__ = timm.create_model(a , pretrained=a ).eval()
__magic_name__ = ResNetForImageClassification(a ).eval()
__magic_name__ = ModuleTransfer(src=a , dest=a )
__magic_name__ = torch.randn((1, 3, 224, 224) )
module_transfer(a )
assert torch.allclose(from_model(a ) , our_model(a ).logits ), "The model logits don't match the original one."
__magic_name__ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(a )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a , )
# we can use the convnext one
__magic_name__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCamelCase ( a , a = None , a = True ) -> int:
'''simple docstring'''
__magic_name__ = '''imagenet-1k-id2label.json'''
__magic_name__ = 1000
__magic_name__ = (1, num_labels)
__magic_name__ = '''huggingface/label-files'''
__magic_name__ = num_labels
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = partial(a , num_labels=a , idalabel=a , labelaid=a )
__magic_name__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a , names_to_config[model_name] , a , a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a , a , a , a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 358 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
__magic_name__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__magic_name__ = MaskFormerConfig(backbone_config=a )
__magic_name__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__magic_name__ = 847
__magic_name__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__magic_name__ = 150
__magic_name__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__magic_name__ = 171
__magic_name__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__magic_name__ = 133
__magic_name__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__magic_name__ = 19
__magic_name__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__magic_name__ = 65
__magic_name__ = '''mapillary-vistas-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( a , a , a ) -> str:
'''simple docstring'''
__magic_name__ = dct.pop(a )
__magic_name__ = val
def UpperCamelCase ( a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__magic_name__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__magic_name__ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:dim, :]
__magic_name__ = in_proj_bias[: dim]
__magic_name__ = in_proj_weight[
dim : dim * 2, :
]
__magic_name__ = in_proj_bias[
dim : dim * 2
]
__magic_name__ = in_proj_weight[
-dim :, :
]
__magic_name__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# fmt: off
__magic_name__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[: hidden_size, :]
__magic_name__ = in_proj_bias[:config.hidden_size]
__magic_name__ = in_proj_weight[hidden_size : hidden_size * 2, :]
__magic_name__ = in_proj_bias[hidden_size : hidden_size * 2]
__magic_name__ = in_proj_weight[-hidden_size :, :]
__magic_name__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ) -> torch.Tensor:
'''simple docstring'''
__magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__magic_name__ = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( a , a , a , a = False ) -> Dict:
'''simple docstring'''
__magic_name__ = get_maskformer_config(a )
# load original state_dict
with open(a , '''rb''' ) as f:
__magic_name__ = pickle.load(a )
__magic_name__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__magic_name__ = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_swin_q_k_v(a , config.backbone_config )
read_in_decoder_q_k_v(a , a )
# update to torch tensors
for key, value in state_dict.items():
__magic_name__ = torch.from_numpy(a )
# load 🤗 model
__magic_name__ = MaskFormerForInstanceSegmentation(a )
model.eval()
for name, param in model.named_parameters():
print(a , param.shape )
__magic_name__ , __magic_name__ = model.load_state_dict(a , strict=a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__magic_name__ = prepare_img()
if "vistas" in model_name:
__magic_name__ = 65
elif "cityscapes" in model_name:
__magic_name__ = 6_5535
else:
__magic_name__ = 255
__magic_name__ = True if '''ade''' in model_name else False
__magic_name__ = MaskFormerImageProcessor(ignore_index=a , reduce_labels=a )
__magic_name__ = image_processor(a , return_tensors='''pt''' )
__magic_name__ = model(**a )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__magic_name__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 98 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import * | 212 |
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if len(re.findall('[ATCG]' , SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 212 | 1 |
import os
def UpperCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(__lowercase ) + '/grid.txt' ) as f:
A_ : List[str] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowercase ) for x in f.readline().split()] )
A_ : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ : List[str] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
A_ : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ : Dict = temp
return maximum
if __name__ == "__main__":
print(solution())
| 192 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : Union[str, Any] = 1_00_00_00 ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__lowerCAmelCase = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 229 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 , lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__SCREAMING_SNAKE_CASE = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 54 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'''emoji''': True,
},
}
]
UpperCamelCase = 0
for log in Path().glob('''*.log'''):
UpperCamelCase = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase = f'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase = []
log.unlink()
UpperCamelCase = ''''''
UpperCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase = []
UpperCamelCase = {}
for test in failed_tests:
UpperCamelCase = test[0].split('''::''')
UpperCamelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase = [test[0] for test in failed_table]
UpperCamelCase = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCamelCase = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase = len(err) + 10
UpperCamelCase = message[: 3000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
UpperCamelCase = '''No failed tests! 🤗'''
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
UpperCamelCase = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
UpperCamelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase = row[0]
else:
UpperCamelCase = ''''''
UpperCamelCase = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 334 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
A: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
A: Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ )
A: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A: str = Timesteps(
block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ )
A: Any = block_out_channels[0]
if use_timestep_embedding:
A: Optional[Any] = block_out_channels[0] * 4
A: List[Any] = TimestepEmbedding(
in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , )
A: Optional[Any] = nn.ModuleList([] )
A: str = None
A: str = nn.ModuleList([] )
A: Tuple = None
# down
A: Any = in_channels
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = output_channel
A: List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[int] = get_down_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(SCREAMING_SNAKE_CASE_ )
# mid
A: Union[str, Any] = get_mid_block(
SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , )
# up
A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) )
A: List[str] = reversed_block_out_channels[0]
if out_block_type is None:
A: int = out_channels
else:
A: Union[str, Any] = block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ):
A: List[Any] = output_channel
A: int = (
reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels
)
A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1
A: Optional[Any] = get_up_block(
SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(SCREAMING_SNAKE_CASE_ )
A: Any = output_channel
# out
A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A: Optional[int] = get_out_block(
out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A: Any = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0:
A: List[str] = timesteps[None].to(sample.device )
A: int = self.time_proj(SCREAMING_SNAKE_CASE_ )
if self.config.use_timestep_embedding:
A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ )
else:
A: str = timestep_embed[..., None]
A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A: List[str] = ()
for downsample_block in self.down_blocks:
A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A: List[Any] = down_block_res_samples[-1:]
A: List[str] = down_block_res_samples[:-1]
A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ )
# 5. post-process
if self.out_block:
A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
| 334 | 1 |
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ = ['image_processor', 'feature_extractor']
lowercase_ = 'TvltImageProcessor'
lowercase_ = 'TvltFeatureExtractor'
def __init__(self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
super().__init__(image_processor=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE)
lowerCamelCase__: Optional[int] =image_processor
lowerCamelCase__: List[str] =feature_extractor
def __call__(self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=False , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int , ) ->Any:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process.")
lowerCamelCase__: Union[str, Any] =None
if images is not None:
lowerCamelCase__: Optional[Any] =self.image_processor(_SCREAMING_SNAKE_CASE , mask_pixel=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if images_mixed is not None:
lowerCamelCase__: Tuple =self.image_processor(_SCREAMING_SNAKE_CASE , is_mixed=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if audio is not None:
lowerCamelCase__: Union[str, Any] =self.feature_extractor(
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , mask_audio=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
lowerCamelCase__: Optional[Any] ={}
if audio is not None:
output_dict.update(_SCREAMING_SNAKE_CASE)
if images is not None:
output_dict.update(_SCREAMING_SNAKE_CASE)
if images_mixed_dict is not None:
output_dict.update(_SCREAMING_SNAKE_CASE)
return output_dict
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =self.image_processor.model_input_names
lowerCamelCase__: Optional[Any] =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 10 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[Any] = """codegen"""
__lowerCAmelCase : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :str ,__lowercase :List[Any]=5_0_4_0_0 ,__lowercase :List[str]=2_0_4_8 ,__lowercase :Tuple=2_0_4_8 ,__lowercase :List[Any]=4_0_9_6 ,__lowercase :Union[str, Any]=2_8 ,__lowercase :Optional[Any]=1_6 ,__lowercase :List[str]=6_4 ,__lowercase :List[Any]=None ,__lowercase :Union[str, Any]="gelu_new" ,__lowercase :Dict=0.0 ,__lowercase :str=0.0 ,__lowercase :List[Any]=0.0 ,__lowercase :str=1e-5 ,__lowercase :List[Any]=0.02 ,__lowercase :int=True ,__lowercase :Optional[Any]=5_0_2_5_6 ,__lowercase :Any=5_0_2_5_6 ,__lowercase :Optional[Any]=False ,**__lowercase :Dict ,):
snake_case__ : List[Any] = vocab_size
snake_case__ : Optional[Any] = n_ctx
snake_case__ : Optional[int] = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : Any = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[str] = n_inner
snake_case__ : Tuple = rotary_dim
snake_case__ : Union[str, Any] = activation_function
snake_case__ : Any = resid_pdrop
snake_case__ : int = embd_pdrop
snake_case__ : Tuple = attn_pdrop
snake_case__ : Dict = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Dict = use_cache
snake_case__ : Tuple = bos_token_id
snake_case__ : int = eos_token_id
super().__init__(
bos_token_id=__lowercase ,eos_token_id=__lowercase ,tie_word_embeddings=__lowercase ,**__lowercase )
class a ( __lowerCamelCase ):
def __init__( self :List[Any] ,__lowercase :PretrainedConfig ,__lowercase :str = "default" ,__lowercase :List[PatchingSpec] = None ,__lowercase :bool = False ,):
super().__init__(__lowercase ,task=__lowercase ,patching_specs=__lowercase ,use_past=__lowercase )
if not getattr(self._config ,'''pad_token_id''' ,__lowercase ):
# TODO: how to do that better?
snake_case__ : Dict = 0
@property
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Optional[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
snake_case__ : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case__ : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCamelCase ( self :int ):
return self._config.n_layer
@property
def __lowerCamelCase ( self :Union[str, Any] ):
return self._config.n_head
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Optional[int] = super(__lowercase ,self ).generate_dummy_inputs(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : Optional[Any] = seqlen + 2
snake_case__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ : Any = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(self.num_layers )
]
snake_case__ : List[Any] = common_inputs['''attention_mask''']
if self.use_past:
snake_case__ : List[Any] = ordered_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase ,__lowercase ,dtype=__lowercase )] ,dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self :Optional[int] ):
return 1_3
| 362 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = original_name.split('''.''' )[0]
snake_case__ : List[str] = key.split('''.''' )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 2] )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 1] )
snake_case__ : Any = orig_block_num - offset
snake_case__ : Tuple = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = OrderedDict()
snake_case__ , snake_case__ : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case__ : Tuple = key[: key.find('''proj''' )]
snake_case__ : Union[str, Any] = key.replace(__lowerCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
snake_case__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case__ : Optional[int] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case__ : Optional[Any] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case__ : int = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case__ : Tuple = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case__ : str = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case__ : Union[str, Any] = key.replace('''head''' , '''classifier''' )
snake_case__ : Union[str, Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = PoolFormerConfig()
# set attributes based on model_name
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = model_name[-3:]
snake_case__ : List[Any] = 1000
snake_case__ : Tuple = '''imagenet-1k-id2label.json'''
snake_case__ : Optional[int] = (1, 1000)
# set config attributes
snake_case__ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case__ : List[str] = [2, 2, 6, 2]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
snake_case__ : Optional[int] = 4.0
snake_case__ : Tuple = 0.9
elif size == "s24":
snake_case__ : Tuple = [4, 4, 12, 4]
snake_case__ : Tuple = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Dict = 0.9
elif size == "s36":
snake_case__ : Optional[Any] = [6, 6, 18, 6]
snake_case__ : str = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Any = 1E-6
snake_case__ : Any = 0.9
elif size == "m36":
snake_case__ : Any = [6, 6, 18, 6]
snake_case__ : Union[str, Any] = [96, 192, 384, 768]
snake_case__ : Dict = 4.0
snake_case__ : Union[str, Any] = 1E-6
snake_case__ : List[Any] = 0.95
elif size == "m48":
snake_case__ : Optional[int] = [8, 8, 24, 8]
snake_case__ : List[str] = [96, 192, 384, 768]
snake_case__ : str = 4.0
snake_case__ : str = 1E-6
snake_case__ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
# Prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case__ : str = rename_keys(__lowerCAmelCase )
# create HuggingFace model and load state dict
snake_case__ : List[str] = PoolFormerForImageClassification(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Define image processor
snake_case__ : int = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case__ : Dict = model(__lowerCAmelCase )
snake_case__ : str = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case__ : Tuple = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case__ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case__ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case__ : Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case__ : List[str] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class snake_case_( a__ ):
__UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 60 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase) | 190 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
a__: List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
a__: str = logging.WARNING
def UpperCamelCase__( )->Dict:
A__ = os.getenv('''DATASETS_VERBOSITY''' , UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCamelCase__( )->str:
return __name__.split('''.''' )[0]
def UpperCamelCase__( )->logging.Logger:
return logging.getLogger(_get_library_name() )
def UpperCamelCase__( )->None:
# Apply our default configuration to the library root logger.
A__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase__( )->None:
A__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase__( UpperCamelCase__ : Optional[str] = None )->logging.Logger:
if name is None:
A__ = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def UpperCamelCase__( )->int:
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase__( UpperCamelCase__ : int )->None:
_get_library_root_logger().setLevel(UpperCamelCase__ )
def UpperCamelCase__( )->List[Any]:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->Optional[Any]:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->Dict:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->Any:
return set_verbosity(UpperCamelCase__ )
def UpperCamelCase__( )->None:
A__ = False
def UpperCamelCase__( )->None:
A__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class SCREAMING_SNAKE_CASE__ :
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ): # pylint: disable=unused-argument
A__ = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self,__lowerCamelCase ):
def empty_fn(*__lowerCamelCase,**__lowerCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
return
a__: Dict = True
class SCREAMING_SNAKE_CASE__ :
def __call__( self,*__lowerCamelCase,__lowerCamelCase=False,**__lowerCamelCase ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase,**__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
A__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a__: str = _tqdm_cls()
def UpperCamelCase__( )->bool:
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase__( )->Optional[Any]:
global _tqdm_active
A__ = True
def UpperCamelCase__( )->int:
global _tqdm_active
A__ = False
| 39 |
def UpperCamelCase__( )->Dict:
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 19_01
A__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 39 | 1 |
"""simple docstring"""
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 80_08_00 , __lowerCAmelCase = 80_08_00 ) -> int:
'''simple docstring'''
lowercase_ = degree * loga(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = calculate_prime_numbers(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 136 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : Union[str, Any] = TypeVar("T")
UpperCAmelCase : Dict = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : int = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Tuple = Union[str, bytes, os.PathLike]
| 136 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __lowerCAmelCase, unittest.TestCase ):
UpperCAmelCase__ = PhobertTokenizer
UpperCAmelCase__ = False
def A_ ( self : Any ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[Any] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
lowerCamelCase__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[Any] = ['''#version: 0.2''', '''l à</w>''']
lowerCamelCase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def A_ ( self : str , **UpperCAmelCase : Optional[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A_ ( self : List[str] , UpperCAmelCase : int ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = '''Tôi là VinAI Research'''
lowerCamelCase__ : Tuple = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : Tuple = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : int = '''Tôi là VinAI Research'''
lowerCamelCase__ : Any = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase__ : str = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 356 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> int:
lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house'
lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] )
lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : str = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 45 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( A__ ):
def __init__( self : List[str] , _a : Optional[Any]="" , _a : Any="train" ) -> int:
'''simple docstring'''
assert os.path.isdir(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_a , _a )
if not os.path.isfile(_a ):
continue
self.documents.append(_a )
def __len__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Optional[int] , _a : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('/' )[-1]
with open(_a , encoding='utf-8' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
return document_name, story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(_UpperCamelCase )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : not t.startswith('@highlight' ) , _UpperCamelCase ) )
return story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.ones_like(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 47 |
'''simple docstring'''
import copy
import re
class A__ :
A__ = 'hp'
A__ = {}
A__ = None
@classmethod
def A ( cls : Optional[Any] , _a : Optional[Any] , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prefix
_SCREAMING_SNAKE_CASE =defaults
cls.build_naming_info()
@staticmethod
def A ( _a : Optional[Any] , _a : List[Any] ) -> Any:
'''simple docstring'''
if len(_a ) == 0:
return ""
_SCREAMING_SNAKE_CASE =None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_a ) + 1 ):
_SCREAMING_SNAKE_CASE =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_a : str ):
_SCREAMING_SNAKE_CASE =''
while integer != 0:
_SCREAMING_SNAKE_CASE =chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE =0
while True:
_SCREAMING_SNAKE_CASE =word + '#' + int_to_alphabetic(_a )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =sword
break
_SCREAMING_SNAKE_CASE =short_word
_SCREAMING_SNAKE_CASE =word
return short_word
@staticmethod
def A ( _a : Optional[Any] , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =param_name.split('_' )
_SCREAMING_SNAKE_CASE =[TrialShortNamer.shortname_for_word(_a , _a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE =['', '_']
for separator in separators:
_SCREAMING_SNAKE_CASE =separator.join(_a )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE =shortname
_SCREAMING_SNAKE_CASE =param_name
return shortname
return param_name
@staticmethod
def A ( _a : Dict , _a : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TrialShortNamer.shortname_for_key(_a , _a )
_SCREAMING_SNAKE_CASE =short_name
_SCREAMING_SNAKE_CASE =param_name
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE ={
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
_SCREAMING_SNAKE_CASE =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_a , _a )
_SCREAMING_SNAKE_CASE =info
@classmethod
def A ( cls : List[Any] , _a : int ) -> int:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['short_param'][k]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =1 if v else 0
_SCREAMING_SNAKE_CASE ='' if isinstance(_a , (int, float) ) else '-'
_SCREAMING_SNAKE_CASE =f"{key}{sep}{v}"
name.append(_a )
return "_".join(_a )
@classmethod
def A ( cls : Optional[Any] , _a : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE =repr.split('_' )
_SCREAMING_SNAKE_CASE ={}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =value.split('-' )
else:
_SCREAMING_SNAKE_CASE =re.sub('[0-9.]' , '' , _a )
_SCREAMING_SNAKE_CASE =float(re.sub('[^0-9.]' , '' , _a ) )
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['reverse_short_param'][p_k]
_SCREAMING_SNAKE_CASE =p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE =cls.DEFAULTS[k]
return parameters
| 47 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCAmelCase (__lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 322 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ):
UpperCAmelCase : List[Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
UpperCAmelCase : Optional[int] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" )
return image
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def _snake_case ( UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : str ):
UpperCAmelCase : str = dct.pop(_snake_case )
UpperCAmelCase : Any = val
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : int ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : Union[str, Any] = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
UpperCAmelCase : Dict = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) )
UpperCAmelCase : Any = qkv_bias
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : int = 364 if "coco" in model_name else 224
UpperCAmelCase : Dict = InstructBlipVisionConfig(image_size=_snake_case ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase : Any = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase : Union[str, Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase : List[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase : List[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
UpperCAmelCase : str = InstructBlipConfig(vision_config=_snake_case , text_config=_snake_case , qformer_config=_snake_case )
return config, image_size
@torch.no_grad()
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=False ):
UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
UpperCAmelCase : Optional[Any] = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase : List[Any] = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
UpperCAmelCase : Dict = get_blipa_config(_snake_case )
UpperCAmelCase : int = InstructBlipForConditionalGeneration(_snake_case ).eval()
UpperCAmelCase : Optional[int] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
UpperCAmelCase : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCAmelCase : str = "cuda:1" if torch.cuda.is_available() else "cpu"
UpperCAmelCase : Optional[int] = "cuda:2" if torch.cuda.is_available() else "cpu"
UpperCAmelCase : List[str] = load_model_and_preprocess(
name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCAmelCase : str = original_model.state_dict()
UpperCAmelCase : Optional[int] = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : int = state_dict.pop(_snake_case )
if key.startswith("""Qformer.bert""" ):
UpperCAmelCase : Dict = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCAmelCase : str = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
UpperCAmelCase : int = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCAmelCase : Dict = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
UpperCAmelCase : Optional[int] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
UpperCAmelCase : Optional[int] = key.replace("""t5""" , """language""" )
UpperCAmelCase : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(_snake_case , _snake_case )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_snake_case , strict=_snake_case )
UpperCAmelCase : Union[str, Any] = load_demo_image()
UpperCAmelCase : Tuple = "What is unusual about this image?"
# create processor
UpperCAmelCase : str = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_snake_case , image_std=_snake_case )
UpperCAmelCase : Tuple = InstructBlipProcessor(
image_processor=_snake_case , tokenizer=_snake_case , qformer_tokenizer=_snake_case , )
UpperCAmelCase : List[str] = processor(images=_snake_case , text=_snake_case , return_tensors="""pt""" ).to(_snake_case )
# make sure processor creates exact same pixel values
UpperCAmelCase : List[Any] = vis_processors["eval"](_snake_case ).unsqueeze(0 ).to(_snake_case )
UpperCAmelCase : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _snake_case )
original_model.to(_snake_case )
hf_model.to(_snake_case )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase : Tuple = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
UpperCAmelCase : List[Any] = hf_model(**_snake_case ).logits
else:
UpperCAmelCase : int = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
UpperCAmelCase : Dict = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(_snake_case )
UpperCAmelCase : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase : Optional[Any] = hf_model(**_snake_case , labels=_snake_case ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase : List[str] = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _snake_case , atol=_snake_case )
print("""Looks ok!""" )
print("""Generating with original model...""" )
UpperCAmelCase : Dict = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
UpperCAmelCase : Optional[int] = hf_model.generate(
**_snake_case , do_sample=_snake_case , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase : Tuple = 2
print("""Original generation:""" , _snake_case )
UpperCAmelCase : Dict = processor.batch_decode(_snake_case , skip_special_tokens=_snake_case )
UpperCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _snake_case )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if push_to_hub:
processor.push_to_hub(F"Salesforce/{model_name}" )
hf_model.push_to_hub(F"Salesforce/{model_name}" )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
A: Tuple = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
A: Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 109 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__magic_name__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( _snake_case : Any , _snake_case : Any , _snake_case : Dict=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__magic_name__ : int = ""
else:
__magic_name__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__magic_name__ : int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Dict = in_proj_weight[
: config.hidden_size, :
]
__magic_name__ : List[str] = in_proj_bias[: config.hidden_size]
__magic_name__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = dct.pop(_snake_case )
__magic_name__ : List[Any] = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__magic_name__ : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_snake_case , )
__magic_name__ : List[str] = ViTHybridConfig(backbone_config=_snake_case , image_size=384 , num_labels=1000 )
__magic_name__ : str = False
# load original model from timm
__magic_name__ : Union[str, Any] = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__magic_name__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
__magic_name__ : Tuple = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
__magic_name__ : List[str] = "huggingface/label-files"
__magic_name__ : int = "imagenet-1k-id2label.json"
__magic_name__ : Optional[int] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
__magic_name__ : int = {int(_snake_case ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[str] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__magic_name__ : List[str] = ViTHybridModel(_snake_case ).eval()
else:
__magic_name__ : str = ViTHybridForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# create image processor
__magic_name__ : List[Any] = create_transform(**resolve_data_config({} , model=_snake_case ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
__magic_name__ : int = ViTHybridImageProcessor(
do_resize=_snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Any = transform(_snake_case ).unsqueeze(0 )
__magic_name__ : Tuple = processor(_snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[int] = model(_snake_case )
__magic_name__ : List[str] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
__magic_name__ : List[str] = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
__magic_name__ : Any = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( ) -> List[str]:
UpperCamelCase : Optional[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=snake_case__ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=snake_case__ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=snake_case__ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=snake_case__ , default='data/dump' , help='The dump file prefix.' )
UpperCamelCase : Optional[int] = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCamelCase : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase : List[str] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
UpperCamelCase : str = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase : int = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase : List[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>`
UpperCamelCase : List[str] = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase : List[str] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
UpperCamelCase : Dict = fp.readlines()
logger.info('Start encoding' )
logger.info(F"""{len(snake_case__ )} examples to process.""" )
UpperCamelCase : List[Any] = []
UpperCamelCase : Dict = 0
UpperCamelCase : List[Any] = 10000
UpperCamelCase : Optional[int] = time.time()
for text in data:
UpperCamelCase : Dict = F"""{bos} {text.strip()} {sep}"""
UpperCamelCase : int = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
rslt.append(snake_case__ )
iter += 1
if iter % interval == 0:
UpperCamelCase : Dict = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCamelCase : List[str] = time.time()
logger.info('Finished binarization' )
logger.info(F"""{len(snake_case__ )} examples processed.""" )
UpperCamelCase : str = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCamelCase : Union[str, Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase : str = [np.uintaa(snake_case__ ) for d in rslt]
else:
UpperCamelCase : str = [np.intaa(snake_case__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 103 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = {}
if self.train_dir is not None:
UpperCamelCase : Any = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : Union[str, Any] = self.validation_dir
UpperCamelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=192, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.6 ) -> Optional[Any]:
UpperCamelCase : List[Any] = input_size
UpperCamelCase : Any = mask_patch_size
UpperCamelCase : Tuple = model_patch_size
UpperCamelCase : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase : Tuple = self.input_size // self.mask_patch_size
UpperCamelCase : int = self.mask_patch_size // self.model_patch_size
UpperCamelCase : Union[str, Any] = self.rand_size**2
UpperCamelCase : str = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
UpperCamelCase : Union[str, Any] = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase : Tuple = np.zeros(self.token_count, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 1
UpperCamelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase : str = mask.repeat(self.scale, axis=0 ).repeat(self.scale, axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( snake_case__ : int ) -> int:
UpperCamelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase : Optional[Any] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : Tuple = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[str] = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase : str = split['train']
UpperCamelCase : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , 'decoder_type' ):
UpperCamelCase : Tuple = 'simmim'
# adapt config
UpperCamelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase : Dict = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
UpperCamelCase : Optional[int] = ds['train'].column_names
else:
UpperCamelCase : Optional[int] = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase : Dict = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : Union[str, Any] = 'image'
elif "img" in column_names:
UpperCamelCase : int = 'img'
else:
UpperCamelCase : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase : Optional[int] = Compose(
[
Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__ : List[Any] ):
UpperCamelCase : Any = [transforms(snake_case__ ) for image in examples[image_column_name]]
UpperCamelCase : Tuple = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase : Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
UpperCamelCase : Any = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Tuple = last_checkpoint
UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 103 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = [1]
__lowercase ,__lowercase ,__lowercase : List[str] = 0, 0, 0
__lowercase : List[str] = ugly_nums[ia] * 2
__lowercase : Any = ugly_nums[ia] * 3
__lowercase : str = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
__lowercase : Union[str, Any] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
__lowercase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowercase : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowercase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_0_0) = }")
| 249 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class UpperCAmelCase_ ( snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase =Features({"question": Value("string" ), "context": Value("string" )} )
UpperCamelCase =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCamelCase ="question"
UpperCamelCase ="context"
UpperCamelCase ="answers"
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 249 | 1 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int = 100 ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = set()
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[str] = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase__ ):
for b in range(2 , lowerCAmelCase__ ):
__UpperCAmelCase : Tuple = a**b # calculates the current power
collect_powers.add(lowerCAmelCase__ ) # adds the result to the set
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 16 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : Optional[List] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : int = len(self.languages ) if self.languages else None
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = set(self.languages )
if self.languages and set(__UpperCAmelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : int = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCamelCase : List[str] = 1_28
elif "12-12" in model_name:
_lowerCamelCase : str = 12
_lowerCamelCase : Any = 12
elif "14-14" in model_name:
_lowerCamelCase : Dict = 14
_lowerCamelCase : int = 14
elif "16-16" in model_name:
_lowerCamelCase : Tuple = 16
_lowerCamelCase : List[str] = 16
else:
raise ValueError('''Model not supported''' )
_lowerCamelCase : str = '''huggingface/label-files'''
if "speech-commands" in model_name:
_lowerCamelCase : int = 35
_lowerCamelCase : Optional[Any] = '''speech-commands-v2-id2label.json'''
else:
_lowerCamelCase : List[Any] = 5_27
_lowerCamelCase : Optional[int] = '''audioset-id2label.json'''
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowerCamelCase : str = idalabel
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( A_ : int ):
'''simple docstring'''
if "module.v" in name:
_lowerCamelCase : Dict = name.replace('''module.v''', '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_lowerCamelCase : Any = name.replace('''cls_token''', '''embeddings.cls_token''' )
if "dist_token" in name:
_lowerCamelCase : Optional[Any] = name.replace('''dist_token''', '''embeddings.distillation_token''' )
if "pos_embed" in name:
_lowerCamelCase : Tuple = name.replace('''pos_embed''', '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''blocks''', '''encoder.layer''' )
if "attn.proj" in name:
_lowerCamelCase : int = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name:
_lowerCamelCase : Tuple = name.replace('''attn''', '''attention.self''' )
if "norm1" in name:
_lowerCamelCase : Any = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
_lowerCamelCase : List[str] = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('''mlp.fc2''', '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCamelCase : Optional[Any] = name.replace('''audio_spectrogram_transformer.norm''', '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_lowerCamelCase : int = name.replace('''module.mlp_head.0''', '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''module.mlp_head.1''', '''classifier.dense''' )
return name
def snake_case_ ( A_ : Any, A_ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Union[str, Any] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_lowerCamelCase : List[str] = key.split('''.''' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = config.hidden_size
if "weight" in key:
_lowerCamelCase : Dict = val[:dim, :]
_lowerCamelCase : Optional[int] = val[dim : dim * 2, :]
_lowerCamelCase : Tuple = val[-dim:, :]
else:
_lowerCamelCase : Tuple = val[:dim]
_lowerCamelCase : Optional[int] = val[dim : dim * 2]
_lowerCamelCase : Dict = val[-dim:]
else:
_lowerCamelCase : str = val
return orig_state_dict
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(snake_case__, snake_case__ )
@torch.no_grad()
def snake_case_ ( A_ : Optional[Any], A_ : int, A_ : Optional[int]=False ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = get_audio_spectrogram_transformer_config(snake_case__ )
_lowerCamelCase : Optional[int] = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_lowerCamelCase : List[str] = model_name_to_url[model_name]
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(snake_case__, map_location='''cpu''' )
# remove some keys
remove_keys(snake_case__ )
# rename some keys
_lowerCamelCase : Tuple = convert_state_dict(snake_case__, snake_case__ )
# load 🤗 model
_lowerCamelCase : List[Any] = ASTForAudioClassification(snake_case__ )
model.eval()
model.load_state_dict(snake_case__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCamelCase : Union[str, Any] = -4.2677393 if '''speech-commands''' not in model_name else -6.845978
_lowerCamelCase : Optional[Any] = 4.5689974 if '''speech-commands''' not in model_name else 5.5654526
_lowerCamelCase : Optional[int] = 10_24 if '''speech-commands''' not in model_name else 1_28
_lowerCamelCase : Optional[int] = ASTFeatureExtractor(mean=snake_case__, std=snake_case__, max_length=snake_case__ )
if "speech-commands" in model_name:
_lowerCamelCase : Tuple = load_dataset('''speech_commands''', '''v0.02''', split='''validation''' )
_lowerCamelCase : str = dataset[0]['''audio''']['''array''']
else:
_lowerCamelCase : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''', )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = torchaudio.load(snake_case__ )
_lowerCamelCase : int = waveform.squeeze().numpy()
_lowerCamelCase : Union[str, Any] = feature_extractor(snake_case__, sampling_rate=1_60_00, return_tensors='''pt''' )
# forward pass
_lowerCamelCase : Optional[Any] = model(**snake_case__ )
_lowerCamelCase : Optional[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCamelCase : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCamelCase : Any = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCamelCase : List[str] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCamelCase : Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCamelCase : Any = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCamelCase : Optional[int] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCamelCase : int = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCamelCase : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3], snake_case__, atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 72 |
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155 | 0 |
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a ) -> Any:
_a : Any = pos_x
_a : Union[str, Any] = pos_y
_a : Dict = (pos_y, pos_x)
_a : Union[str, Any] = goal_x
_a : Union[str, Any] = goal_y
_a : int = parent
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> int:
_a : str = Node(start[1] , start[0] , goal[1] , goal[0] , _a )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , _a )
_a : int = [self.start]
_a : List[Any] = False
def __lowercase ( self ) -> Path | None:
while self.node_queue:
_a : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : List[Any] = True
return self.retrace_path(_a )
_a : Optional[Any] = self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self , _a ) -> list[Node]:
_a : List[Any] = []
for action in delta:
_a : List[Any] = parent.pos_x + action[1]
_a : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a , _a , self.target.pos_y , self.target.pos_x , _a ) )
return successors
def __lowercase ( self , _a ) -> Path:
_a : Dict = node
_a : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : List[Any] = current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> List[Any]:
_a : Optional[int] = BreadthFirstSearch(_a , _a )
_a : List[str] = BreadthFirstSearch(_a , _a )
_a : Tuple = False
def __lowercase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[Any] = True
return self.retrace_bidirectional_path(
_a , _a )
_a : List[Any] = current_bwd_node
_a : Dict = current_fwd_node
_a : int = {
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __lowercase ( self , _a , _a ) -> Path:
_a : str = self.fwd_bfs.retrace_path(_a )
_a : Any = self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
_a : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , )-> Union[str, Any]:
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 20}
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_flip_channel_order
def _snake_case ( self )-> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Tuple = MobileViTImageProcessor if is_vision_available() else None
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =MobileViTImageProcessingTester(self )
@property
def _snake_case ( self )-> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_flip_channel_order""" ) )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self )-> Union[str, Any]:
pass
def _snake_case ( self )-> Dict:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self )-> str:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self )-> List[Any]:
# Initialize image_processing
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase_ =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 154 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Optional[int] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =s.rsplit(_A , _A )
return new.join(_A )
def __UpperCamelCase ( _A : List[Any] ) ->Dict:
"""simple docstring"""
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ =["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase_ =key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowerCamelCase_ =key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowerCamelCase_ =rreplace(_A , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowerCamelCase_ =rreplace(_A , """.b""" , """.bias""" , 1 )
lowerCamelCase_ =value.float()
return upgrade
@torch.no_grad()
def __UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] , _A : List[Any]=None , _A : Dict=True ) ->Optional[int]:
"""simple docstring"""
from dall_e import Encoder
lowerCamelCase_ =Encoder()
if os.path.exists(_A ):
lowerCamelCase_ =torch.load(_A )
else:
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A )
if isinstance(_A , _A ):
lowerCamelCase_ =ckpt.state_dict()
encoder.load_state_dict(_A )
if config_path is not None:
lowerCamelCase_ =FlavaImageCodebookConfig.from_pretrained(_A )
else:
lowerCamelCase_ =FlavaImageCodebookConfig()
lowerCamelCase_ =FlavaImageCodebook(_A ).eval()
lowerCamelCase_ =encoder.state_dict()
lowerCamelCase_ =upgrade_state_dict(_A )
hf_model.load_state_dict(_A )
lowerCamelCase_ =hf_model.state_dict()
lowerCamelCase_ =count_parameters(_A )
lowerCamelCase_ =count_parameters(_A )
assert torch.allclose(_A , _A , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_A )
else:
return hf_state_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 154 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase__ ( lowerCamelCase : np.ndarray ,lowerCamelCase : np.ndarray ,lowerCamelCase : np.ndarray ,lowerCamelCase : int ,lowerCamelCase : int ):
_A : Dict = cva.getAffineTransform(lowerCamelCase ,lowerCamelCase )
return cva.warpAffine(lowerCamelCase ,lowerCamelCase ,(rows, cols) )
if __name__ == "__main__":
# read original image
A : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A : List[Any] = gray_img.shape
# set different points to rotate image
A : Any = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A : Any = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A : Optional[Any] = plt.figure(1)
A : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 352 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = torch.device('''cpu''')
def lowerCAmelCase__ ( ):
_A : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_A : Dict = Image.open(requests.get(lowerCamelCase ,stream=lowerCamelCase ).raw )
return im
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
_A : Union[str, Any] = dct.pop(lowerCamelCase )
_A : List[str] = val
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ):
_A : Optional[Any] = []
for k in state_dict.keys():
_A : Optional[int] = k
if ".pwconv" in k:
_A : str = k_new.replace('.pwconv' ,'.point_wise_conv' )
if ".dwconv" in k:
_A : Any = k_new.replace('.dwconv' ,'.depth_wise_conv' )
if ".Proj." in k:
_A : Optional[Any] = k_new.replace('.Proj.' ,'.proj.' )
if "patch_embed" in k_new:
_A : Optional[int] = k_new.replace('patch_embed' ,'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_A : Tuple = k_new.split('.' )
if ls[2].isdigit():
_A : List[Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_A : List[str] = k_new.replace('network' ,'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_A : Any = 1000
_A : int = 'huggingface/label-files'
_A : List[Any] = 'imagenet-1k-id2label.json'
_A : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase ,lowerCamelCase ,repo_type='dataset' ) ,'r' ) )
_A : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
_A : Optional[int] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_A : Optional[Any] = [3, 3, 6, 4]
_A : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_A : List[Any] = [3, 3, 9, 6]
_A : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_A : int = [4, 3, 10, 5]
_A : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_A : Optional[Any] = [4, 4, 12, 6]
_A : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_A : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase ,map_location='cpu' ,check_hash=lowerCamelCase )
else:
_A : Union[str, Any] = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Union[str, Any] = checkpoint
_A : List[str] = create_rename_keys(lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# load HuggingFace model
_A : str = SwiftFormerForImageClassification(lowerCamelCase ).eval()
hf_model.load_state_dict(lowerCamelCase )
# prepare test inputs
_A : Any = prepare_img()
_A : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
_A : Any = processor(images=lowerCamelCase ,return_tensors='pt' )
# compare outputs from both models
_A : int = get_expected_output(lowerCamelCase )
_A : Optional[int] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,lowerCamelCase ,atol=1E-3 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A : List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 227 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def A ( snake_case__ , snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
SCREAMING_SNAKE_CASE__ = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
SCREAMING_SNAKE_CASE__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(snake_case__ )-1}""" )
if "norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
SCREAMING_SNAKE_CASE__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(snake_case__ )-1}""" )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ = key[key.find("""block""" ) + len("""block""" )]
SCREAMING_SNAKE_CASE__ = key.replace(f"""block{idx}""" , f"""block.{int(snake_case__ )-1}""" )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
SCREAMING_SNAKE_CASE__ = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ = key[key.find("""linear_c""" ) + len("""linear_c""" )]
SCREAMING_SNAKE_CASE__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(snake_case__ )-1}""" )
if key.startswith("""head""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ = kv_bias[
config.hidden_sizes[i] :
]
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SegformerConfig()
SCREAMING_SNAKE_CASE__ = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "segformer" in model_name:
SCREAMING_SNAKE_CASE__ = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE__ = 1_50
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_name[4:6]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 10_00)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(snake_case__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 2_56
elif size == "b2":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
# prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=snake_case__ , return_tensors="""pt""" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )
else:
SCREAMING_SNAKE_CASE__ = torch.load(snake_case__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(snake_case__ , encoder_only=snake_case__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = SegformerForImageClassification(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = SegformerForSemanticSegmentation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ = model(snake_case__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 165 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
snake_case : Dict = 'perceiver'
def __init__( self , __lowerCAmelCase=256 , __lowerCAmelCase=1280 , __lowerCAmelCase=768 , __lowerCAmelCase=1 , __lowerCAmelCase=26 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="kv" , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=262 , __lowerCAmelCase=2048 , __lowerCAmelCase=56 , __lowerCAmelCase=[368, 496] , __lowerCAmelCase=16 , __lowerCAmelCase=1920 , __lowerCAmelCase=16 , __lowerCAmelCase=[1, 16, 224, 224] , **__lowerCAmelCase , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = num_latents
UpperCamelCase__ = d_latents
UpperCamelCase__ = d_model
UpperCamelCase__ = num_blocks
UpperCamelCase__ = num_self_attends_per_block
UpperCamelCase__ = num_self_attention_heads
UpperCamelCase__ = num_cross_attention_heads
UpperCamelCase__ = qk_channels
UpperCamelCase__ = v_channels
UpperCamelCase__ = cross_attention_shape_for_attention
UpperCamelCase__ = self_attention_widening_factor
UpperCamelCase__ = cross_attention_widening_factor
UpperCamelCase__ = hidden_act
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_query_residual
# masked language modeling attributes
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
# image classification attributes
UpperCamelCase__ = image_size
# flow attributes
UpperCamelCase__ = train_size
# multimodal autoencoding attributes
UpperCamelCase__ = num_frames
UpperCamelCase__ = audio_samples_per_frame
UpperCamelCase__ = samples_per_patch
UpperCamelCase__ = output_shape
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def _lowerCamelCase ( self ):
return 1E-4
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 3 , __lowerCAmelCase = 40 , __lowerCAmelCase = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [""" """.join(["""a"""] ) * seq_length] * batch_size
UpperCamelCase__ = dict(preprocessor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = inputs.pop("""input_ids""" )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCamelCase__ = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 368 |
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """masked_bert"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="topK" , __lowerCAmelCase="constant" , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 87 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(__snake_case , '''_dynamo''' ):
return False
return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule )
def a_ ( __snake_case : Tuple , __snake_case : bool = True ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ =is_compiled_module(__snake_case )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ =getattr(__snake_case , '''forward''' )
lowerCamelCase_ =model.__dict__.pop('''_original_forward''' , __snake_case )
if original_forward is not None:
while hasattr(__snake_case , '''__wrapped__''' ):
lowerCamelCase_ =forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ =forward
if getattr(__snake_case , '''_converted_to_transformer_engine''' , __snake_case ):
convert_model(__snake_case , to_transformer_engine=__snake_case )
if is_compiled:
lowerCamelCase_ =model
lowerCamelCase_ =compiled_model
return model
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
PartialState().wait_for_everyone()
def a_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case , __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case , __snake_case )
@contextmanager
def a_ ( **__snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
lowerCamelCase_ =str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a_ ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if not hasattr(__snake_case , '''__qualname__''' ) and not hasattr(__snake_case , '''__name__''' ):
lowerCamelCase_ =getattr(__snake_case , '''__class__''' , __snake_case )
if hasattr(__snake_case , '''__qualname__''' ):
return obj.__qualname__
if hasattr(__snake_case , '''__name__''' ):
return obj.__name__
return str(__snake_case )
def a_ ( __snake_case : Dict , __snake_case : Any ) -> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =destination.setdefault(__snake_case , {} )
merge_dicts(__snake_case , __snake_case )
else:
lowerCamelCase_ =value
return destination
def a_ ( __snake_case : int = None ) -> bool:
"""simple docstring"""
if port is None:
lowerCamelCase_ =2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 75 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
_UpperCAmelCase : str =math.log(len(__lowerCamelCase ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase =logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ):
@register_to_config
def __init__( self , snake_case , snake_case = None , snake_case = None) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str =torch.zeros(snake_case , snake_case)
else:
_UpperCAmelCase : Tuple =None
_UpperCAmelCase : int =torch.nn.Parameter(snake_case)
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
UpperCAmelCase =42
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =len(snake_case) if isinstance(snake_case , snake_case) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase : Union[str, Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : str =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
_UpperCAmelCase : Union[str, Any] =text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Optional[int] =self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : List[str] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =prompt_embeds.repeat_interleave(snake_case , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Any =negative_prompt_embeds.unsqueeze(0).repeat(snake_case , 1 , 1)
else:
_UpperCAmelCase : str =[''] * batch_size
_UpperCAmelCase : Dict =text_input_ids.shape[-1]
_UpperCAmelCase : str =self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : str =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Tuple =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : int =negative_prompt_embeds.shape[1]
_UpperCAmelCase : List[str] =negative_prompt_embeds.repeat(1 , snake_case , 1)
_UpperCAmelCase : Optional[int] =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str =torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , snake_case , snake_case = 1_0_0 , snake_case = 5.0 , snake_case = 1.0 , snake_case = 1 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Tuple =1
elif isinstance(snake_case , snake_case):
_UpperCAmelCase : int =len(snake_case)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case)}")
_UpperCAmelCase : Optional[Any] =batch_size * num_images_per_prompt
_UpperCAmelCase : Union[str, Any] =guidance_scale > 1.0
_UpperCAmelCase : Any =self._encode_prompt(snake_case , snake_case , snake_case)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case)}.")
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[Any] =self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] =torch.full(snake_case , snake_case).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
_UpperCAmelCase : Optional[Any] =latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device)
_UpperCAmelCase : int =self.scheduler.timesteps.to(self.device)
_UpperCAmelCase : Dict =latents
for i, t in enumerate(self.progress_bar(snake_case)):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] =torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Optional[Any] =self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Dict =model_output.chunk(2)
_UpperCAmelCase : Dict =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case)
_UpperCAmelCase : Any =self.truncate(snake_case , snake_case)
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int =model_output.clamp(-7_0)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] =self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[str] =self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : int =self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case)
_UpperCAmelCase : str =self.vqvae.decode(snake_case , force_not_quantize=snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Tuple =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : Optional[int] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
def lowerCAmelCase ( self , snake_case , snake_case) -> torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict =torch.sort(snake_case , 1 , descending=snake_case)
_UpperCAmelCase : Dict =torch.exp(snake_case)
_UpperCAmelCase : str =sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : Optional[int] =torch.full_like(keep_mask[:, 0:1, :] , snake_case)
_UpperCAmelCase : Any =torch.cat((all_true, keep_mask) , dim=1)
_UpperCAmelCase : Dict =keep_mask[:, :-1, :]
_UpperCAmelCase : Any =keep_mask.gather(1 , indices.argsort(1))
_UpperCAmelCase : str =log_p_x_0.clone()
_UpperCAmelCase : Any =-torch.inf # -inf = log(0)
return rv
| 242 | 0 |
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
pass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
pass
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Any ) -> Optional[Any]:
__UpperCamelCase : str = [
[],
[],
[],
]
def _lowerCamelCase ( self :Dict , a :int , a :int ) -> None:
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self :Optional[int] ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :int ) -> str:
__UpperCamelCase : Any = []
def _lowerCamelCase ( self :Any , a :int ) -> None:
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(a )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__UpperCamelCase : List[Any] = min(self.queue )
self.queue.remove(a )
return data
def __str__( self :Tuple ) -> str:
return str(self.queue )
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCamelCase : Dict = FixedPriorityQueue()
fpq.enqueue(0 , 10)
fpq.enqueue(1 , 70)
fpq.enqueue(0 , 100)
fpq.enqueue(2 , 1)
fpq.enqueue(2 , 5)
fpq.enqueue(1 , 7)
fpq.enqueue(2 , 4)
fpq.enqueue(1 , 64)
fpq.enqueue(0 , 128)
print(_lowerCamelCase)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(_lowerCamelCase)
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
print(fpq.dequeue())
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCamelCase : Any = ElementPriorityQueue()
epq.enqueue(10)
epq.enqueue(70)
epq.enqueue(100)
epq.enqueue(1)
epq.enqueue(5)
epq.enqueue(7)
epq.enqueue(4)
epq.enqueue(64)
epq.enqueue(128)
print(_lowerCamelCase)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(_lowerCamelCase)
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
print(epq.dequeue())
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 232 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :List[Any] , a :Dict , a :Any=3 , a :Any=3_2 , a :Optional[Any]=3 , a :str=1_0 , a :Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , a :Optional[Any]=[1, 1, 2, 1] , a :Optional[Any]=True , a :Dict=True , a :Tuple="relu" , a :List[str]=3 , a :Tuple=None , ) -> Tuple:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : int = image_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Optional[int] = embeddings_size
__UpperCamelCase : List[Any] = hidden_sizes
__UpperCamelCase : Optional[Any] = depths
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : Tuple = num_labels
__UpperCamelCase : Tuple = scope
__UpperCamelCase : Dict = len(a )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
__UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase ( self :List[Any] , a :Dict , a :int , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : str = TFResNetModel(config=a )
__UpperCamelCase : Union[str, Any] = model(a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :List[str] , a :Optional[Any] ) -> Any:
__UpperCamelCase : str = self.num_labels
__UpperCamelCase : Optional[int] = TFResNetForImageClassification(a )
__UpperCamelCase : List[str] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = config_and_inputs
__UpperCamelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_A = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _lowerCamelCase ( self :int ) -> List[str]:
__UpperCamelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCamelCase : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a )
def _lowerCamelCase ( self :int ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self :str ) -> Optional[Any]:
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def _lowerCamelCase ( self :Tuple ) -> Tuple:
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
pass
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Dict = model_class(a )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Dict = [*signature.parameters.keys()]
__UpperCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self :List[str] ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
def check_hidden_states_output(a :Optional[Any] , a :Optional[int] , a :List[str] ):
__UpperCamelCase : int = model_class(a )
__UpperCamelCase : int = model(**self._prepare_for_class(a , a ) )
__UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase : int = layer_type
__UpperCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : int = True
check_hidden_states_output(a , a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self :Dict ) -> Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] = TFResNetModel.from_pretrained(a )
self.assertIsNotNone(a )
def _SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self :Optional[Any] ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCamelCase : List[Any] = self.default_image_processor
__UpperCamelCase : List[str] = prepare_img()
__UpperCamelCase : List[str] = image_processor(images=a , return_tensors="tf" )
# forward pass
__UpperCamelCase : Dict = model(**a )
# verify the logits
__UpperCamelCase : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
__UpperCamelCase : Union[str, Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4 ) ) | 232 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[int] = "▁"
_lowercase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_lowercase : Any = {
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
_lowercase : List[str] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : Any = legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : Any = 1
lowercase_ : Any = len(self.sp_model )
lowercase_ : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : str = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Any = src_lang if src_lang is not None else '''eng_Latn'''
lowercase_ : Any = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.__dict__.copy()
lowercase_ : Union[str, Any] = None
lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Union[str, Any] = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [1] * len(self.prefix_tokens )
lowercase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : str = [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[int] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Union[str, Any] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Any = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "eng_Latn" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fra_Latn" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = src_lang
lowercase_ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase_ : Any = []
lowercase_ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : str = [self.cur_lang_code]
lowercase_ : Optional[Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase_ : Tuple = []
lowercase_ : int = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Any = [self.cur_lang_code]
lowercase_ : str = [self.eos_token_id]
| 264 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = {}
lowercase_ : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : Tuple = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Tuple = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : str = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flax_dict[key]
lowercase_ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : str = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
lowercase_ : List[str] = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : List[str] = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Optional[int] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : Dict = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : int = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[Any] = PixaStructImageProcessor()
lowercase_ : int = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : Tuple = 4096
lowercase_ : Optional[int] = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 264 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __UpperCAmelCase ( UpperCAmelCase_ : list[list[float]] ) -> list[list[float]]:
'''simple docstring'''
__snake_case : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__snake_case : Dict = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__snake_case : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case : Tuple = matrix[1][1], matrix[0][0]
__snake_case , __snake_case : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCAmelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__snake_case : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__snake_case : int = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__snake_case : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__snake_case : List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__snake_case : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__snake_case : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__snake_case : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__snake_case : Union[str, Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__snake_case : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__snake_case : Any = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case : Optional[Any] = array(UpperCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
__snake_case : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case : Optional[int] = array(UpperCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCAmelCase_ )
# Calculate the inverse of the matrix
return [[float(d(UpperCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 172 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : str= {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Any= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172 | 1 |
"""simple docstring"""
_snake_case = [0, 2, 4, 6, 8]
_snake_case = [1, 3, 5, 7, 9]
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__UpperCAmelCase : Optional[int] = 0
for digit in range(10 ):
__UpperCAmelCase : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, snake_case__, snake_case__ )
return result
__UpperCAmelCase : Tuple = 0
for digita in range(10 ):
__UpperCAmelCase : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
__UpperCAmelCase : List[str] = ODD_DIGITS
else:
__UpperCAmelCase : Any = EVEN_DIGITS
for digita in other_parity_digits:
__UpperCAmelCase : List[Any] = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, snake_case__, snake_case__, )
return result
def _UpperCamelCase ( snake_case__ = 9 ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(snake_case__, 0, [0] * length, snake_case__ )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 369 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : Tuple = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : str = [128, 256, 512, 1024]
__UpperCAmelCase : str = 512
if "large" in model_name:
__UpperCAmelCase : Dict = [3, 3, 27, 3]
__UpperCAmelCase : int = [192, 384, 768, 1536]
__UpperCAmelCase : Dict = 768
if "xlarge" in model_name:
__UpperCAmelCase : List[Any] = [3, 3, 27, 3]
__UpperCAmelCase : Tuple = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : List[Any] = 150
__UpperCAmelCase : str = "huggingface/label-files"
__UpperCAmelCase : List[Any] = "ade20k-id2label.json"
__UpperCAmelCase : str = json.load(open(hf_hub_download(snake_case__, snake_case__, repo_type="dataset" ), "r" ) )
__UpperCAmelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : int = ConvNextConfig(
depths=snake_case__, hidden_sizes=snake_case__, out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : int = UperNetConfig(
backbone_config=snake_case__, auxiliary_in_channels=snake_case__, num_labels=snake_case__, idalabel=snake_case__, labelaid=snake_case__, )
return config
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Any:
__UpperCAmelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Dict = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
__UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(snake_case__, map_location="cpu" )["state_dict"]
__UpperCAmelCase : Dict = get_upernet_config(snake_case__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : str = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCAmelCase : int = key.replace("bn", "batch_norm" )
__UpperCAmelCase : Union[str, Any] = val
# rename keys
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCAmelCase : int = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : Optional[int] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
__UpperCAmelCase : str = SegformerImageProcessor()
__UpperCAmelCase : Any = processor(snake_case__, return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : Dict = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:", outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : Optional[int] = len(UpperCamelCase__ )
_lowerCamelCase : List[Any] = max(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = min(UpperCamelCase__ )
# create the counting array
_lowerCamelCase : Union[str, Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
_lowerCamelCase : str = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Any = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
_lowerCamelCase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 362 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = UnCLIPImageVariationPipeline
snake_case = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
snake_case = IMAGE_VARIATION_BATCH_PARAMS
snake_case = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
snake_case = False
@property
def _snake_case ( self )->Any:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return 32
@property
def _snake_case ( self )->Any:
'''simple docstring'''
return self.time_input_dim
@property
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _snake_case ( self )->List[str]:
'''simple docstring'''
return 100
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
A_ : Tuple = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self )->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
A_ : int = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self )->Any:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _snake_case ( self )->int:
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _snake_case ( self )->int:
'''simple docstring'''
torch.manual_seed(1 )
A_ : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = self.dummy_decoder
A_ : List[str] = self.dummy_text_proj
A_ : Optional[Any] = self.dummy_text_encoder
A_ : Any = self.dummy_tokenizer
A_ : Optional[Any] = self.dummy_super_res_first
A_ : Union[str, Any] = self.dummy_super_res_last
A_ : str = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
A_ : Optional[Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
A_ : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
A_ : Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True )->Optional[Any]:
'''simple docstring'''
A_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
A_ : str = input_image * 0.5 + 0.5
A_ : int = input_image.clamp(0 , 1 )
A_ : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A_ : Any = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : int = '''cpu'''
A_ : int = self.get_dummy_components()
A_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = output.images
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Any = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : int = '''cpu'''
A_ : Optional[int] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Dict = pipe(**_SCREAMING_SNAKE_CASE )
A_ : List[Any] = output.images
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : Any = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Dict = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = '''cpu'''
A_ : Any = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : Any = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
A_ : str = pipe(**_SCREAMING_SNAKE_CASE )
A_ : str = output.images
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : Dict = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
A_ : List[str] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
A_ : Tuple = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = torch.device('''cpu''' )
class _lowerCamelCase :
"""simple docstring"""
snake_case = 1
A_ : List[Any] = self.get_dummy_components()
A_ : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
A_ : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ : Union[str, Any] = pipe.decoder.dtype
A_ : int = 1
A_ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
A_ : int = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
A_ : Optional[int] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
A_ : Optional[int] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
A_ : Dict = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
A_ : str = pipeline_inputs.pop('''image''' )
A_ : List[str] = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
A_ : List[Any] = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Optional[Any] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
A_ : Union[str, Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = torch_device == '''cpu'''
A_ : Tuple = True
A_ : int = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
A_ : Optional[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _snake_case ( self )->Dict:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
A_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
A_ : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
A_ : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A_ : Dict = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
A_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 )
| 186 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = analyze_text(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowercase__ : int = sum(single_char_strings.values() )
# one length string
lowercase__ : List[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase__ : List[str] = single_char_strings[ch]
lowercase__ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowercase__ : Optional[int] = sum(two_char_strings.values() )
lowercase__ : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase__ : Any = cha + cha
if sequence in two_char_strings:
lowercase__ : Union[str, Any] = two_char_strings[sequence]
lowercase__ : Any = int(SCREAMING_SNAKE_CASE_ ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE_ )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Dict = Counter() # type: ignore
lowercase__ : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 216 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
snake_case_ = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
snake_case_ = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
snake_case_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
snake_case_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 216 | 1 |
'''simple docstring'''
__lowerCAmelCase = 0 # The first color of the flag.
__lowerCAmelCase = 1 # The second color of the flag.
__lowerCAmelCase = 2 # The third color of the flag.
__lowerCAmelCase = (red, white, blue)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not sequence:
return []
if len(_SCREAMING_SNAKE_CASE ) == 1:
return list(_SCREAMING_SNAKE_CASE )
_snake_case = 0
_snake_case = len(_SCREAMING_SNAKE_CASE ) - 1
_snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
_snake_case, _snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_snake_case, _snake_case = sequence[high], sequence[mid]
high -= 1
else:
_snake_case = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('Enter numbers separated by commas:\n').strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(',')]
print(f'''{dutch_national_flag_sort(unsorted)}''') | 341 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ):
_snake_case = 1
_snake_case = 2
_snake_case = 0
_snake_case = 0
_snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''') | 341 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> float:
return 0.0
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> tuple[int | float, int | float]:
_lowerCAmelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =512
_lowerCAmelCase =[1] + [0] * (size - 1)
_lowerCAmelCase =[filter_type.process(__UpperCamelCase ) for item in inputs]
_lowerCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase =np.abs(np.fft.fft(__UpperCamelCase ) )
_lowerCAmelCase =20 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCAmelCase =get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__UpperCamelCase )
plt.show()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =512
_lowerCAmelCase =[1] + [0] * (size - 1)
_lowerCAmelCase =[filter_type.process(__UpperCamelCase ) for item in inputs]
_lowerCAmelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase =np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 341 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a__ ( a_ ):
_SCREAMING_SNAKE_CASE : int = "deit"
def __init__( self , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=224 , _UpperCamelCase=16 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=16 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : str = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : str = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : List[Any] = initializer_range
_lowercase : str = layer_norm_eps
_lowercase : Tuple = image_size
_lowercase : Any = patch_size
_lowercase : Dict = num_channels
_lowercase : List[str] = qkv_bias
_lowercase : str = encoder_stride
class a__ ( a_ ):
_SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return 1E-4
| 250 |
"""simple docstring"""
from itertools import product
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
lowerCAmelCase__ : Union[str, Any] = sides_number
lowerCAmelCase__ : Optional[int] = max_face_number * dice_number
lowerCAmelCase__ : List[str] = [0] * (max_total + 1)
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Optional[int] = range(__UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(__UpperCAmelCase , repeat=__UpperCAmelCase ):
lowerCAmelCase__ : str = sum(__UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> float:
lowerCAmelCase__ : Union[str, Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 9
lowerCAmelCase__ : Tuple = 4 * 9
lowerCAmelCase__ : Optional[int] = 6
for peter_total in range(__UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ : Tuple = (4**9) * (6**6)
lowerCAmelCase__ : Union[str, Any] = peter_wins_count / total_games_number
lowerCAmelCase__ : Optional[int] = round(__UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 242 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :int = nn.Linear(3 , 4 )
lowerCAmelCase__ :str = nn.BatchNormad(4 )
lowerCAmelCase__ :Any = nn.Linear(4 , 5 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return output + 1
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = ModelForTest()
lowerCAmelCase__ :int = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(test_model._hf_hook , __UpperCAmelCase )
self.assertTrue(hasattr(__UpperCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__UpperCAmelCase , '_old_forward' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ModelForTest()
lowerCAmelCase__ :List[str] = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase , append=__UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__UpperCAmelCase , '_old_forward' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = ModelForTest()
lowerCAmelCase__ :Optional[int] = torch.randn(2 , 3 )
lowerCAmelCase__ :int = test_model(x + 1 )
lowerCAmelCase__ :int = test_model(x + 2 )
lowerCAmelCase__ :Optional[Any] = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ :Any = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ :int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = ModelForTest()
lowerCAmelCase__ :int = torch.randn(2 , 3 )
lowerCAmelCase__ :int = test_model(__UpperCAmelCase )
lowerCAmelCase__ :Any = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ :Optional[Any] = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ :Dict = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , output + 2 , atol=1E-5 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ModelForTest()
lowerCAmelCase__ :List[str] = torch.randn(2 , 3 )
lowerCAmelCase__ :Tuple = test_model(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ :Dict = True
lowerCAmelCase__ :Union[str, Any] = test_model(__UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ :Dict = torch.randn(2 , 3 )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCAmelCase , AlignDevicesHook(io_same_device=__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCAmelCase__ :List[str] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ :Union[str, Any] = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ :Any = torch.randn(2 , 3 )
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
lowerCAmelCase__ :int = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCAmelCase__ :List[str] = torch.randn(2 , 3 )
lowerCAmelCase__ :Any = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCAmelCase__ :List[Any] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ :List[Any] = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ :str = torch.randn(2 , 3 )
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , offload_buffers=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCAmelCase__ :Any = torch.randn(2 , 3 )
lowerCAmelCase__ :str = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
lowerCAmelCase__ :Optional[int] = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ :Any = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.randn(2 , 3 )
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() , offload_buffers=__UpperCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
lowerCAmelCase__ :Tuple = torch.randn(2 , 3 )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 354 |
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : Union[str, Any] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : str = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _lowercase :
"""simple docstring"""
A__ = True
A__ = None
# Automatically constructed
A__ = "PIL.Image.Image"
A__ = pa.struct({"bytes": pa.binary(), "path": pa.string()})
A__ = field(default="Image" , init=lowercase__ , repr=lowercase__)
def __call__( self : Any ):
'''simple docstring'''
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = path.split("::" )[-1]
try:
lowerCamelCase__ : str = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : Any = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
lowerCamelCase__ : int = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
lowerCamelCase__ : List[str] = BytesIO(f.read() )
lowerCamelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : List[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCamelCase__ : Dict = storage.field("bytes" )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCamelCase__ : Dict = storage.field("path" )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Union[str, Any] ):
with xopen(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : str = f.read()
return bytes_
lowerCamelCase__ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : int = image.format
else:
lowerCamelCase__ : int = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_A , format=_A )
return buffer.getvalue()
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowerCamelCase__ : int = array.dtype
lowerCamelCase__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[str] = dtype.kind
lowerCamelCase__ : Optional[Any] = dtype.itemsize
lowerCamelCase__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : List[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : Optional[Any] = dtype_byteorder + dtype_kind + str(_A )
lowerCamelCase__ : int = np.dtype(_A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : List[Any] = PIL.Image.fromarray(array.astype(_A ) )
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowerCamelCase__ , lowerCamelCase__ : int = first_non_null_value(_A )
if isinstance(_A , _A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_A , np.ndarray ):
lowerCamelCase__ : Optional[Any] = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
else:
return objs
else:
return objs
| 184 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = """table-transformer"""
UpperCamelCase_ : str = ["""past_key_values"""]
UpperCamelCase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Tuple=1_00 , SCREAMING_SNAKE_CASE_ : int=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE_ : Tuple=8 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : Tuple=20_48 , SCREAMING_SNAKE_CASE_ : Tuple=8 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_56 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : str="sine" , SCREAMING_SNAKE_CASE_ : int="resnet50" , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : int=0.1 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A: Optional[Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A: Tuple = backbone_config.get('''model_type''' )
A: Dict = CONFIG_MAPPING[backbone_model_type]
A: Any = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
A , A , A: Optional[Any] = None, None, None
A: str = use_timm_backbone
A: List[Any] = backbone_config
A: Tuple = num_channels
A: str = num_queries
A: Optional[Any] = d_model
A: Dict = encoder_ffn_dim
A: Optional[int] = encoder_layers
A: Dict = encoder_attention_heads
A: List[str] = decoder_ffn_dim
A: Tuple = decoder_layers
A: Tuple = decoder_attention_heads
A: List[Any] = dropout
A: List[Any] = attention_dropout
A: Tuple = activation_dropout
A: Optional[Any] = activation_function
A: List[Any] = init_std
A: Optional[Any] = init_xavier_std
A: Dict = encoder_layerdrop
A: List[str] = decoder_layerdrop
A: Optional[int] = encoder_layers
A: List[str] = auxiliary_loss
A: List[str] = position_embedding_type
A: List[Any] = backbone
A: Union[str, Any] = use_pretrained_backbone
A: Any = dilation
# Hungarian matcher
A: Tuple = class_cost
A: Union[str, Any] = bbox_cost
A: Dict = giou_cost
# Loss coefficients
A: Tuple = mask_loss_coefficient
A: Optional[Any] = dice_loss_coefficient
A: Optional[int] = bbox_loss_coefficient
A: Dict = giou_loss_coefficient
A: Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _snake_case ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _snake_case ( self : Dict ) -> int:
'''simple docstring'''
return self.d_model
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = version.parse("""1.11""" )
@property
def _snake_case ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1E-5
@property
def _snake_case ( self : Any ) -> int:
'''simple docstring'''
return 12
| 334 |
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a__ ( _UpperCamelCase : bool = True ,*_UpperCamelCase : Optional[Any] ,**_UpperCamelCase : Tuple ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
__lowerCamelCase = False
if main_process_only:
__lowerCamelCase = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase ,**_UpperCamelCase ,disable=_UpperCamelCase )
| 330 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowercase : str = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_lowercase : Dict = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_lowercase : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def a ( self : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[str]=None , _lowercase : Union[str, Any]=None , _lowercase : Tuple=None , _lowercase : str=None , _lowercase : Any="auto" , _lowercase : Optional[Any]=-1 , _lowercase : Dict=0.9 , _lowercase : Union[str, Any]=5 , _lowercase : str=5_00 , _lowercase : Tuple="gpt2-large" , _lowercase : Union[str, Any]=-1 , _lowercase : Dict=10_24 , _lowercase : Tuple=25 , _lowercase : List[str]=5 , _lowercase : Union[str, Any]=True , _lowercase : Union[str, Any]=25 , ):
__UpperCAmelCase = compute_mauve(
p_text=_lowercase , q_text=_lowercase , p_features=_lowercase , q_features=_lowercase , p_tokens=_lowercase , q_tokens=_lowercase , num_buckets=_lowercase , pca_max_data=_lowercase , kmeans_explained_var=_lowercase , kmeans_num_redo=_lowercase , kmeans_max_iter=_lowercase , featurize_model_name=_lowercase , device_id=_lowercase , max_text_length=_lowercase , divergence_curve_discretization_size=_lowercase , mauve_scaling_factor=_lowercase , verbose=_lowercase , seed=_lowercase , )
return out
| 86 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.