code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float = 1E-12 , _SCREAMING_SNAKE_CASE : int = 100 , ):
'''simple docstring'''
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase = False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
_UpperCAmelCase = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase = vector.conj().T if is_complex else vector.T
_UpperCAmelCase = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
_UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase = True
_UpperCAmelCase = lambda_
if is_complex:
_UpperCAmelCase = np.real(lambda_ )
return lambda_, vector
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase = np.array([41, 4, 20] )
_UpperCAmelCase = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase = real_input_matrix
_UpperCAmelCase = real_vector
elif problem_type == "complex":
_UpperCAmelCase = complex_input_matrix
_UpperCAmelCase = complex_vector
# Our implementation.
_UpperCAmelCase , _UpperCAmelCase = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase , _UpperCAmelCase = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
_UpperCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : List[str] = get_tests_dir("fixtures")
__A : Optional[Any] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__A : Union[str, Any] = get_tests_dir("fixtures/dummy-config.json")
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] )->Dict:
_UpperCAmelCase = 0
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[str] )->Dict:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Dict )->Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_UpperCAmelCase = WavaVecaFeatureExtractor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str )->str:
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->Any:
with self.assertRaisesRegex(
__UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase__ ( self : Dict )->Any:
with self.assertRaisesRegex(
__UpperCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def lowercase__ ( self : Tuple )->List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self : Optional[Any] )->int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Any )->str:
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 326 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = 1.5
_UpperCAmelCase = int(factor * num_class_images )
_UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
_UpperCAmelCase = int(factor * num_images )
_UpperCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = tqdm(desc='''downloading real regularization images''' , total=_SCREAMING_SNAKE_CASE )
with open(f'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(f'{class_data_dir}/urls.txt' , '''w''' ) as fa, open(
f'{class_data_dir}/images.txt' , '''w''' ) as fa:
while total < num_class_images:
_UpperCAmelCase = class_images[count]
count += 1
try:
_UpperCAmelCase = requests.get(images['''url'''] )
if img.status_code == 200:
_UpperCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'{class_data_dir}/images/{total}.jpg' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser('''''' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__A : int = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 326 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Collection[float] | None = None )->None:
if components is None:
_UpperCAmelCase = []
_UpperCAmelCase = list(__UpperCamelCase )
def __len__( self : Optional[int] )->int:
return len(self.__components )
def __str__( self : Any )->str:
return "(" + ",".join(map(__UpperCamelCase , self.__components ) ) + ")"
def __add__( self : Any , __UpperCamelCase : Vector )->Vector:
_UpperCAmelCase = len(self )
if size == len(__UpperCamelCase ):
_UpperCAmelCase = [self.__components[i] + other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self : str , __UpperCamelCase : Vector )->Vector:
_UpperCAmelCase = len(self )
if size == len(__UpperCamelCase ):
_UpperCAmelCase = [self.__components[i] - other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Optional[int] , __UpperCamelCase : float )->Vector:
...
@overload
def __mul__( self : Dict , __UpperCamelCase : Vector )->float:
...
def __mul__( self : int , __UpperCamelCase : float | Vector )->float | Vector:
if isinstance(__UpperCamelCase , (float, int) ):
_UpperCAmelCase = [c * other for c in self.__components]
return Vector(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(self ) == len(__UpperCamelCase ):
_UpperCAmelCase = len(self )
_UpperCAmelCase = [self.__components[i] * other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )]
return sum(__UpperCamelCase )
else: # error case
raise Exception('''invalid operand!''' )
def lowercase__ ( self : Union[str, Any] )->Vector:
return Vector(self.__components )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int )->float:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : float )->None:
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCAmelCase = value
def lowercase__ ( self : Optional[Any] )->float:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(__UpperCamelCase ) )
def lowercase__ ( self : List[str] , __UpperCamelCase : Vector , __UpperCamelCase : bool = False )->float:
_UpperCAmelCase = self * other
_UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ))
_UpperCAmelCase = [0] * dimension
_UpperCAmelCase = 1
return Vector(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Vector , _SCREAMING_SNAKE_CASE : Vector ):
'''simple docstring'''
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (isinstance(_SCREAMING_SNAKE_CASE , (int, float) ))
)
return x * scalar + y
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
return Vector(_SCREAMING_SNAKE_CASE )
class _a :
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : list[list[float]] , __UpperCamelCase : int , __UpperCamelCase : int )->None:
_UpperCAmelCase = matrix
_UpperCAmelCase = w
_UpperCAmelCase = h
def __str__( self : Optional[Any] )->str:
_UpperCAmelCase = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : List[Any] , __UpperCamelCase : Matrix )->Matrix:
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase = []
for i in range(self.__height ):
_UpperCAmelCase = [
self.__matrix[i][j] + other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : Optional[Any] , __UpperCamelCase : Matrix )->Matrix:
if self.__width == other.width() and self.__height == other.height():
_UpperCAmelCase = []
for i in range(self.__height ):
_UpperCAmelCase = [
self.__matrix[i][j] - other.component(__UpperCamelCase , __UpperCamelCase )
for j in range(self.__width )
]
matrix.append(__UpperCamelCase )
return Matrix(__UpperCamelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : List[Any] , __UpperCamelCase : float )->Matrix:
...
@overload
def __mul__( self : Dict , __UpperCamelCase : Vector )->Vector:
...
def __mul__( self : Union[str, Any] , __UpperCamelCase : float | Vector )->Vector | Matrix:
if isinstance(__UpperCamelCase , __UpperCamelCase ): # matrix-vector
if len(__UpperCamelCase ) == self.__width:
_UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCAmelCase = [
self.__matrix[i][j] * other.component(__UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(__UpperCamelCase , sum(__UpperCamelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__UpperCamelCase , (int, float) ): # matrix-scalar
_UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__UpperCamelCase , self.__width , self.__height )
return None
def lowercase__ ( self : Dict )->int:
return self.__height
def lowercase__ ( self : Dict )->int:
return self.__width
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int )->float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float )->None:
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCAmelCase = value
else:
raise Exception('''change_component: indices out of bounds''' )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : int )->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__UpperCamelCase , __UpperCamelCase )
else:
raise Exception('''Indices out of bounds''' )
def lowercase__ ( self : List[Any] )->float:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 , __UpperCamelCase ) for y in range(self.__width )
]
return sum(__UpperCamelCase )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = [[0] * n for _ in range(_SCREAMING_SNAKE_CASE )]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
[random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )
]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
_UpperCAmelCase , _UpperCAmelCase = b, a % b
return a
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_SCREAMING_SNAKE_CASE , a % b )
def lowercase ( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 326 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : int | str ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowercase ( _SCREAMING_SNAKE_CASE : int = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
__A : str = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if k.startswith('''encoder''' ):
_UpperCAmelCase = k.replace('''.attn''' , '''.self_attn''' )
_UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
_UpperCAmelCase = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
_UpperCAmelCase = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
_UpperCAmelCase = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_UpperCAmelCase = sd.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
_UpperCAmelCase = v
__A : Dict = ["START"]
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )
_UpperCAmelCase = model['''model''']
_UpperCAmelCase = BlenderbotConfig.from_json_file(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = BlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = m.model.state_dict().keys()
_UpperCAmelCase = []
_UpperCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_SCREAMING_SNAKE_CASE )
m.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
m.half()
m.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 326 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = (boundary[1] - boundary[0]) / steps
_UpperCAmelCase = boundary[0]
_UpperCAmelCase = boundary[1]
_UpperCAmelCase = make_points(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = a + h
while x < (b - h):
yield x
_UpperCAmelCase = x + h
def lowercase ( _SCREAMING_SNAKE_CASE : int ): # enter your function here
'''simple docstring'''
_UpperCAmelCase = (x - 0) * (x - 0)
return y
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = 0.0 # Lower bound of integration
_UpperCAmelCase = 1.0 # Upper bound of integration
_UpperCAmelCase = 10.0 # define number of steps or resolution
_UpperCAmelCase = [a, b] # define boundary of integration
_UpperCAmelCase = method_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 326 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326 | 1 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = parquet_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str]=("train",) ):
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({'''train''': parquet_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = '''train'''
_UpperCAmelCase = {'''train''': parquet_path, '''test''': parquet_path}
_UpperCAmelCase = tmp_path / '''cache'''
_UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCAmelCase = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / '''foo.parquet''' )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = str(shared_datadir / '''test_image_rgb.jpg''' )
_UpperCAmelCase = {'''image''': [image_path]}
_UpperCAmelCase = Features({'''image''': Image()} )
_UpperCAmelCase = Dataset.from_dict(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=_SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert get_writer_batch_size(_SCREAMING_SNAKE_CASE ) == expected
| 326 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple:
_UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )]
if identifier is not None:
_UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for n_ in n_identifier:
_UpperCAmelCase = [file for file in files if n_ not in file]
else:
_UpperCAmelCase = [file for file in files if n_identifier not in file]
_UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __UpperCamelCase )
if only_modules:
_UpperCAmelCase = file.split('''.''' )[0]
try:
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase )
_UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
_UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''modeling'''
_UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''tokenization'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''configuration'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = Path('''docs/source''' )
_UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
_UpperCAmelCase = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
_UpperCAmelCase = args.func(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 326 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Optional[int] = TypeVar("T")
class _a ( Generic[T]):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : list[T] , __UpperCamelCase : Callable[[T, T], T] )->None:
_UpperCAmelCase = None
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase__ ( self : int )->None:
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : T )->None:
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : int )->T | None: # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(__UpperCamelCase , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(__UpperCamelCase , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : Any = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Union[str, Any] = SegmentTree(test_array, max)
__A : List[str] = SegmentTree(test_array, lambda a, b: a + b)
def lowercase ( ):
'''simple docstring'''
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = reduce(_SCREAMING_SNAKE_CASE , test_array[i : j + 1] )
_UpperCAmelCase = reduce(_SCREAMING_SNAKE_CASE , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert max_range == max_segment_tree.query(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert sum_range == sum_segment_tree.query(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_all_segments()
for index, value in test_updates.items():
__A : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 326 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Tuple = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
__A : Tuple = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
__A : Any = {
"ctrl": 256,
}
__A : Optional[Any] = {
"Pregnancy": 168629,
"Christianity": 7675,
"Explain": 106423,
"Fitness": 63440,
"Saving": 63163,
"Ask": 27171,
"Ass": 95985,
"Joke": 163509,
"Questions": 45622,
"Thoughts": 49605,
"Retail": 52342,
"Feminism": 164338,
"Writing": 11992,
"Atheism": 192263,
"Netflix": 48616,
"Computing": 39639,
"Opinion": 43213,
"Alone": 44967,
"Funny": 58917,
"Gaming": 40358,
"Human": 4088,
"India": 1331,
"Joker": 77138,
"Diet": 36206,
"Legal": 11859,
"Norman": 4939,
"Tip": 72689,
"Weight": 52343,
"Movies": 46273,
"Running": 23425,
"Science": 2090,
"Horror": 37793,
"Confession": 60572,
"Finance": 12250,
"Politics": 16360,
"Scary": 191985,
"Support": 12654,
"Technologies": 32516,
"Teenage": 66160,
"Event": 32769,
"Learned": 67460,
"Notion": 182770,
"Wikipedia": 37583,
"Books": 6665,
"Extract": 76050,
"Confessions": 102701,
"Conspiracy": 75932,
"Links": 63674,
"Narcissus": 150425,
"Relationship": 54766,
"Relationships": 134796,
"Reviews": 41671,
"News": 4256,
"Translation": 26820,
"multilingual": 128406,
}
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTROL_CODES
def __init__( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple="<unk>" , **__UpperCamelCase : str )->Tuple:
super().__init__(unk_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowercase__ ( self : Union[str, Any] )->int:
return len(self.encoder )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] )->Optional[int]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = '''@@ '''.join(__UpperCamelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
return word
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] )->int:
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r'''\S+\n?''' , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any )->Optional[int]:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] )->str:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple )->Optional[int]:
_UpperCAmelCase = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_UpperCAmelCase = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
_UpperCAmelCase = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 326 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase__ = Features({"""audio""": Audio()})
UpperCamelCase__ = Features({"""labels""": ClassLabel})
UpperCamelCase__ = "audio"
UpperCamelCase__ = "labels"
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict )->Union[str, Any]:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.label_schema.copy()
_UpperCAmelCase = features[self.label_column]
_UpperCAmelCase = label_schema
return task_template
@property
def lowercase__ ( self : str )->Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 326 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bert-generation"""
def __init__( self : Tuple , __UpperCamelCase : Dict=5_0_3_5_8 , __UpperCamelCase : Optional[int]=1_0_2_4 , __UpperCamelCase : List[Any]=2_4 , __UpperCamelCase : Dict=1_6 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Any=5_1_2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : List[Any]="absolute" , __UpperCamelCase : Tuple=True , **__UpperCamelCase : Union[str, Any] , )->Any:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 326 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 1 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
__A : int = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__A : List[str] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__A : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : int )->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=None )->Dict:
return {
"matthews_correlation": float(matthews_corrcoef(__UpperCamelCase , __UpperCamelCase , sample_weight=__UpperCamelCase ) ),
}
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]=1_3 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Any=True , __UpperCamelCase : int=True , __UpperCamelCase : Any=True , __UpperCamelCase : Any=True , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Union[str, Any]=9_9 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : List[Any]=3_2 , __UpperCamelCase : Any=5 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : List[str]=5_1_2 , __UpperCamelCase : Optional[Any]=1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : Any=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : str="last" , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , )->List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
def lowercase__ ( self : Tuple )->str:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] )->List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , )->Optional[int]:
_UpperCAmelCase = FlaubertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str , )->List[str]:
_UpperCAmelCase = FlaubertWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , )->Optional[Any]:
_UpperCAmelCase = FlaubertForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , )->str:
_UpperCAmelCase = FlaubertForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
((_UpperCAmelCase) , ) = result_with_labels.to_tuple()
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
((_UpperCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , )->Dict:
_UpperCAmelCase = FlaubertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , )->int:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = FlaubertForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , )->List[Any]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = FlaubertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Any )->Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=False )->str:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def lowercase__ ( self : Optional[Any] )->List[str]:
_UpperCAmelCase = FlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=3_7 )
def lowercase__ ( self : List[Any] )->Any:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def lowercase__ ( self : Optional[int] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[int] )->List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=__UpperCamelCase )
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = torch.jit.trace(
__UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , '''traced_model.pt''' ) )
_UpperCAmelCase = torch.jit.load(os.path.join(__UpperCamelCase , '''traced_model.pt''' ) , map_location=__UpperCamelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCamelCase ) , inputs_dict['''attention_mask'''].to(__UpperCamelCase ) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
| 326 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f'{i}\t\t{d}' )
def lowercase ( _SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : list[float] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowercase ( _SCREAMING_SNAKE_CASE : list[dict[str, int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = [float('''inf''' )] * vertex_count
_UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
_UpperCAmelCase = distance[u] + w
_UpperCAmelCase = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = int(input("Enter number of vertices: ").strip())
__A : List[Any] = int(input("Enter number of edges: ").strip())
__A : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__A , __A , __A : List[Any] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__A : List[Any] = {"src": src, "dst": dest, "weight": weight}
__A : List[Any] = int(input("\nEnter shortest path source:").strip())
__A : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
_UpperCAmelCase = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
_UpperCAmelCase = 1
return True
_UpperCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
_UpperCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_UpperCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_UpperCAmelCase = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE )
):
return True
_UpperCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : float = math.inf , _SCREAMING_SNAKE_CASE : float = -math.inf , _SCREAMING_SNAKE_CASE : float = math.inf , _SCREAMING_SNAKE_CASE : float = -math.inf , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : float = 100 , _SCREAMING_SNAKE_CASE : float = 0.01 , _SCREAMING_SNAKE_CASE : float = 1 , ):
'''simple docstring'''
_UpperCAmelCase = False
_UpperCAmelCase = search_prob
_UpperCAmelCase = start_temperate
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = None
while not search_end:
_UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCAmelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
_UpperCAmelCase = None
_UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
_UpperCAmelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCAmelCase = picked_neighbor
else:
_UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCAmelCase = picked_neighbor
_UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCAmelCase = True
else:
_UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__A : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__A : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__A : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__A : Dict = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__A : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__A : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
__A : Optional[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__A : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """FlavaImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Union[str, Any] , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str )->List[Any]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
def __call__( self : List[Any] , __UpperCamelCase : Optional[ImageInput] = None , __UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Union[str, Any] , )->List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
_UpperCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowercase__ ( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] )->Any:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict )->Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Dict )->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Any )->Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 326 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_UpperCAmelCase = [True] * (num + 1)
_UpperCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(_SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
__A : Union[str, Any] = input().strip()
__A : str = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 326 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 1 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , 0 , -1 ):
_UpperCAmelCase = False
for j in range(_SCREAMING_SNAKE_CASE , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j]
_UpperCAmelCase = True
for j in range(_SCREAMING_SNAKE_CASE ):
if unsorted[j] > unsorted[j + 1]:
_UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j]
_UpperCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__A : str = [int(item) for item in user_input.split(",")]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 326 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
_UpperCAmelCase = ''''''
while len(_SCREAMING_SNAKE_CASE ) % 3 != 0:
_UpperCAmelCase = '''0''' + bin_string
_UpperCAmelCase = [
bin_string[index : index + 3]
for index in range(len(_SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase = 0
for index, val in enumerate(_SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(_SCREAMING_SNAKE_CASE ) )
oct_string += str(_SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 326 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
_UpperCAmelCase = 4
_UpperCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(_SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _SCREAMING_SNAKE_CASE )
print('''Decoded:''' , decode(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 326 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
return "".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(_SCREAMING_SNAKE_CASE )] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if (len(_SCREAMING_SNAKE_CASE ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple:
_UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )]
if identifier is not None:
_UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for n_ in n_identifier:
_UpperCAmelCase = [file for file in files if n_ not in file]
else:
_UpperCAmelCase = [file for file in files if n_identifier not in file]
_UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __UpperCamelCase )
if only_modules:
_UpperCAmelCase = file.split('''.''' )[0]
try:
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase )
_UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
_UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''modeling'''
_UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''tokenization'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''configuration'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = Path('''docs/source''' )
_UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 326 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 1 |
"""simple docstring"""
import random
from typing import Any
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
_UpperCAmelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
_UpperCAmelCase , _UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
__A : Any = [0, 1, 2, 3, 4, 5, 6, 7]
__A : List[Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 326 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 1 |
"""simple docstring"""
from math import ceil, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_UpperCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_UpperCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__A : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def lowercase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
_UpperCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PegasusConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = PegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch_model.model.state_dict()
_UpperCAmelCase = {}
for k, v in tf_weights.items():
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
_UpperCAmelCase = mapping['''shared.weight''']
_UpperCAmelCase = mapping['''shared.weight''']
_UpperCAmelCase = {k: torch.zeros_like(_SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def lowercase ( _SCREAMING_SNAKE_CASE : Any="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_UpperCAmelCase = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
_UpperCAmelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = array
return tf_weights
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).parent.name
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']['''max_position_embeddings''']
_UpperCAmelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_SCREAMING_SNAKE_CASE )
# convert model
_UpperCAmelCase = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase = task_specific_params
_UpperCAmelCase = convert_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_SCREAMING_SNAKE_CASE , Path(_SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__A : Optional[int] = parser.parse_args()
if args.save_dir is None:
__A : Dict = Path(args.tf_ckpt_path).parent.name
__A : Tuple = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 326 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """ChineseCLIPImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Tuple )->Optional[int]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
def __call__( self : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , **__UpperCamelCase : str )->Union[str, Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowercase__ ( self : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Dict )->Dict:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : int , *__UpperCamelCase : Any , **__UpperCamelCase : int )->Dict:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Dict )->Union[str, Any]:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Union[str, Any] )->List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__A : Any = list[list[float | int]]
def lowercase ( _SCREAMING_SNAKE_CASE : Matrix , _SCREAMING_SNAKE_CASE : Matrix ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE ):
for row in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE )
]
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = [[0] for _ in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def interpolated_func(_SCREAMING_SNAKE_CASE : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE ) )
return interpolated_func
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase ( _SCREAMING_SNAKE_CASE : Callable[[int], int] = question_function , _SCREAMING_SNAKE_CASE : int = 10 ):
'''simple docstring'''
_UpperCAmelCase = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : int )->None:
_UpperCAmelCase = size
_UpperCAmelCase = [0] * size
_UpperCAmelCase = [0] * size
@staticmethod
def lowercase__ ( __UpperCamelCase : int )->int:
return index | (index + 1)
@staticmethod
def lowercase__ ( __UpperCamelCase : int )->int:
return (index & (index + 1)) - 1
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int )->None:
_UpperCAmelCase = value
while index < self.size:
_UpperCAmelCase = self.get_prev(__UpperCamelCase ) + 1
if current_left_border == index:
_UpperCAmelCase = value
else:
_UpperCAmelCase = max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_next(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int )->int:
right -= 1 # Because of right is exclusive
_UpperCAmelCase = 0
while left <= right:
_UpperCAmelCase = self.get_prev(__UpperCamelCase )
if left <= current_left:
_UpperCAmelCase = max(__UpperCamelCase , self.tree[right] )
_UpperCAmelCase = current_left
else:
_UpperCAmelCase = max(__UpperCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
__A : Dict = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__A : str = [{"type": "code", "content": INSTALL_CONTENT}]
__A : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 326 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 1 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return getitem, k
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return setitem, k, v
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return delitem, k
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , *_SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
return fun(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
__A : Dict = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__A : Optional[int] = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__A : int = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__A : int = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__A : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__A : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(_SCREAMING_SNAKE_CASE ) == str(_SCREAMING_SNAKE_CASE )
assert set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def lowercase ( ):
'''simple docstring'''
def is_public(_SCREAMING_SNAKE_CASE : str ) -> bool:
return not name.startswith('''_''' )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_SCREAMING_SNAKE_CASE )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """unispeech-sat"""
def __init__( self : List[Any] , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : str=7_6_8 , __UpperCamelCase : Optional[Any]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : Optional[int]=3_0_7_2 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Dict=1e-5 , __UpperCamelCase : Tuple="group" , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCamelCase : int=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=1_2_8 , __UpperCamelCase : str=1_6 , __UpperCamelCase : Any=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : int=1_0 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Dict=1_0 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : str=3_2_0 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=1_0_0 , __UpperCamelCase : List[str]=2_5_6 , __UpperCamelCase : Optional[int]=2_5_6 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[int]="mean" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : int=False , __UpperCamelCase : Dict=2_5_6 , __UpperCamelCase : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCamelCase : str=(5, 3, 3, 1, 1) , __UpperCamelCase : Any=(1, 2, 3, 1, 1) , __UpperCamelCase : Union[str, Any]=5_1_2 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=5_0_4 , **__UpperCamelCase : str , )->int:
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
_UpperCAmelCase = num_clusters
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = feat_quantizer_dropout
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = xvector_output_dim
@property
def lowercase__ ( self : int )->Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = t // 3600, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=300 ):
'''simple docstring'''
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCAmelCase = f'{elt:.6f}' if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(_SCREAMING_SNAKE_CASE )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _a :
"""simple docstring"""
UpperCamelCase__ = 5
UpperCamelCase__ = 0.2
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Optional["NotebookTrainingTracker"] = None , __UpperCamelCase : int = 3_0_0 , )->Optional[int]:
_UpperCAmelCase = total
_UpperCAmelCase = '''''' if prefix is None else prefix
_UpperCAmelCase = leave
_UpperCAmelCase = parent
_UpperCAmelCase = width
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
def lowercase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : bool = False , __UpperCamelCase : str = None )->Union[str, Any]:
_UpperCAmelCase = value
if comment is not None:
_UpperCAmelCase = comment
if self.last_value is None:
_UpperCAmelCase = _UpperCAmelCase = time.time()
_UpperCAmelCase = _UpperCAmelCase = value
_UpperCAmelCase = _UpperCAmelCase = None
_UpperCAmelCase = self.warmup
_UpperCAmelCase = 1
self.update_bar(__UpperCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCAmelCase = time.time()
_UpperCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCAmelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCAmelCase = None
if value >= self.total:
_UpperCAmelCase = self.total
_UpperCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(__UpperCamelCase )
_UpperCAmelCase = value
_UpperCAmelCase = current_time
if self.average_time_per_item is None:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowercase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any]=None )->int:
_UpperCAmelCase = ''' ''' * (len(str(self.total ) ) - len(str(__UpperCamelCase ) )) + str(__UpperCamelCase )
if self.elapsed_time is None:
_UpperCAmelCase = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_UpperCAmelCase = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_UpperCAmelCase = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self : Optional[Any] )->Dict:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=None )->List[str]:
super().__init__(__UpperCamelCase )
_UpperCAmelCase = None if column_names is None else [column_names]
_UpperCAmelCase = None
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self : int , __UpperCamelCase : Optional[int] )->Dict:
if self.inner_table is None:
_UpperCAmelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCAmelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__UpperCamelCase )
_UpperCAmelCase = columns
self.inner_table.append([values[c] for c in columns] )
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Union[str, Any]=3_0_0 )->str:
_UpperCAmelCase = NotebookProgressBar(__UpperCamelCase , prefix=__UpperCamelCase , parent=self , width=__UpperCamelCase )
return self.child_bar
def lowercase__ ( self : int )->List[Any]:
_UpperCAmelCase = None
self.display()
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Tuple )->Tuple:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
def lowercase__ ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , **__UpperCamelCase : Any )->List[str]:
_UpperCAmelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_UpperCAmelCase = NotebookTrainingTracker(state.max_steps , __UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , **__UpperCamelCase : Tuple )->str:
_UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_UpperCAmelCase = False
def lowercase__ ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Dict )->int:
if not has_length(__UpperCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCAmelCase = self.training_tracker.add_child(len(__UpperCamelCase ) )
else:
_UpperCAmelCase = NotebookProgressBar(len(__UpperCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] , **__UpperCamelCase : str )->Dict:
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCAmelCase = None
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=None , **__UpperCamelCase : str )->Dict:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCAmelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCAmelCase = state.global_step
self.training_tracker.write_line(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=None , **__UpperCamelCase : str )->Any:
if self.training_tracker is not None:
_UpperCAmelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCAmelCase = log['''loss''']
break
if self.first_column == "Epoch":
_UpperCAmelCase = int(state.epoch )
else:
_UpperCAmelCase = state.global_step
_UpperCAmelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_UpperCAmelCase = re.sub(r'''\_loss$''' , '''''' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop('''total_flos''' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop('''epoch''' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop(F'{metric_key_prefix}_runtime' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop(F'{metric_key_prefix}_samples_per_second' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop(F'{metric_key_prefix}_steps_per_second' , __UpperCamelCase )
_UpperCAmelCase = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , __UpperCamelCase )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
_UpperCAmelCase = v
else:
_UpperCAmelCase = k.split('''_''' )
_UpperCAmelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_UpperCAmelCase = v
self.training_tracker.write_line(__UpperCamelCase )
self.training_tracker.remove_child()
_UpperCAmelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCAmelCase = True
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : str )->Any:
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=__UpperCamelCase )
_UpperCAmelCase = None
| 326 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__A : int = logging.getLogger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'''repo_id''': str(_SCREAMING_SNAKE_CASE ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=4 )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if params.n_gpu <= 0:
_UpperCAmelCase = 0
_UpperCAmelCase = -1
_UpperCAmelCase = True
_UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
_UpperCAmelCase = int(os.environ['''WORLD_SIZE'''] )
_UpperCAmelCase = int(os.environ['''N_GPU_NODE'''] )
_UpperCAmelCase = int(os.environ['''RANK'''] )
# number of nodes / node ID
_UpperCAmelCase = params.world_size // params.n_gpu_per_node
_UpperCAmelCase = params.global_rank // params.n_gpu_per_node
_UpperCAmelCase = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
_UpperCAmelCase = params.n_nodes > 1
# summary
_UpperCAmelCase = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 326 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase ( _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rt in rc.restypes:
_UpperCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_UpperCAmelCase = {name: i for i, name in enumerate(_SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['''aatype'''].device , )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['''aatype'''].device , )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['''aatype'''].device , )
_UpperCAmelCase = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_UpperCAmelCase = restype_atomaa_mask[protein_aatype]
_UpperCAmelCase = residx_atomaa_mask
_UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_UpperCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
_UpperCAmelCase = rc.restype_atoa[restype_letter]
_UpperCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_UpperCAmelCase = rc.atom_order[atom_name]
_UpperCAmelCase = 1
_UpperCAmelCase = restype_atomaa_mask[protein_aatype]
_UpperCAmelCase = residx_atomaa_mask
return protein
def lowercase ( _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
_UpperCAmelCase = tree_map(lambda _SCREAMING_SNAKE_CASE : torch.tensor(_SCREAMING_SNAKE_CASE , device=batch['''aatype'''].device ) , _SCREAMING_SNAKE_CASE , np.ndarray )
_UpperCAmelCase = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : np.array(_SCREAMING_SNAKE_CASE ) , make_atomaa_masks(_SCREAMING_SNAKE_CASE ) )
return out
| 326 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if index == r:
for j in range(_SCREAMING_SNAKE_CASE ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase = arr[i]
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__A : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """LayoutLMv3ImageProcessor"""
UpperCamelCase__ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __UpperCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __UpperCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Dict , )->BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_UpperCAmelCase = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase = features['''words''']
_UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel values
_UpperCAmelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_UpperCAmelCase = self.get_overflowing_images(__UpperCamelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_UpperCAmelCase = images
return encoded_inputs
def lowercase__ ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] )->List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}' )
return images_with_overflow
def lowercase__ ( self : List[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Any )->List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple )->Any:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : str )->List[str]:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase__ ( self : Tuple )->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple )->Dict:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 326 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase ( _SCREAMING_SNAKE_CASE : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_UpperCAmelCase = nums[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = nums[i]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , ans + num , _SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__A : Any = int(input("Enter number of elements : ").strip())
__A : Optional[Any] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
"""simple docstring"""
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float = 1E-12 , _SCREAMING_SNAKE_CASE : int = 100 , ):
'''simple docstring'''
assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_UpperCAmelCase )[0] == np.shape(_UpperCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_UpperCAmelCase ) == np.iscomplexobj(_UpperCAmelCase )
_UpperCAmelCase = np.iscomplexobj(_UpperCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_UpperCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase = False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase = np.dot(_UpperCAmelCase , _UpperCAmelCase )
# Normalize the resulting output vector.
_UpperCAmelCase = w / np.linalg.norm(_UpperCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase = vector.conj().T if is_complex else vector.T
_UpperCAmelCase = np.dot(_UpperCAmelCase , np.dot(_UpperCAmelCase , _UpperCAmelCase ) )
# Check convergence.
_UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase = True
_UpperCAmelCase = lambda_
if is_complex:
_UpperCAmelCase = np.real(lambda_ )
return lambda_, vector
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase = np.array([41, 4, 20] )
_UpperCAmelCase = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase = real_input_matrix
_UpperCAmelCase = real_vector
elif problem_type == "complex":
_UpperCAmelCase = complex_input_matrix
_UpperCAmelCase = complex_vector
# Our implementation.
_UpperCAmelCase = power_iteration(_UpperCAmelCase , _UpperCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase = np.linalg.eigh(_UpperCAmelCase )
# Last eigenvalue is the maximum one.
_UpperCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_UpperCAmelCase ) - np.abs(_UpperCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : int = logging.get_logger(__name__)
__A : List[str] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class _a ( A__ , A__):
"""simple docstring"""
UpperCamelCase__ = "focalnet"
def __init__( self : str , __UpperCamelCase : List[Any]=2_2_4 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Dict=3 , __UpperCamelCase : str=9_6 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[int]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __UpperCamelCase : Dict=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[2, 2, 2, 2] , __UpperCamelCase : Optional[int]=[3, 3, 3, 3] , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=4.0 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Tuple=1e-4 , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : str=1e-5 , __UpperCamelCase : str=3_2 , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Dict , )->List[Any]:
super().__init__(**__A )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 351 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326 | 0 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__A : str = "src/transformers"
# Matches is_xxx_available()
__A : Optional[Any] = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__A : List[str] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A : List[str] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__A : Optional[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__A : Optional[Any] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A : Tuple = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__A : int = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__A : Optional[Any] = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__A : List[Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__A : Optional[int] = re.compile(r"^\s*try:")
# Catches a line with else:
__A : List[Any] = re.compile(r"^\s*else:")
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
'''simple docstring'''
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
_UpperCAmelCase = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def lowercase ( _SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
_UpperCAmelCase = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
_UpperCAmelCase = re.findall(r'''\[([^\]]+)\]''' , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_UpperCAmelCase = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
_UpperCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_UpperCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
_UpperCAmelCase = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
_UpperCAmelCase = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(''', ''' )
_UpperCAmelCase = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
def find_duplicates(_SCREAMING_SNAKE_CASE : str ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase = []
for key in import_dict_objects.keys():
_UpperCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
_UpperCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowercase ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
_UpperCAmelCase = os.path.join(__UpperCAmelCase , '''__init__.py''' )
_UpperCAmelCase = parse_init(__UpperCAmelCase )
if objects is not None:
_UpperCAmelCase = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_UpperCAmelCase = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''\n\n'''.join(__UpperCAmelCase ) )
def lowercase ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_UpperCAmelCase = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
_UpperCAmelCase = short_path.replace(os.path.sep , '''.''' )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
_UpperCAmelCase = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
__A : str = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowercase ( ) -> List[str]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
_UpperCAmelCase = direct_transformers_import(__UpperCAmelCase )
_UpperCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , '''__init__.py''' ) , '''r''' ) as f:
_UpperCAmelCase = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , __UpperCAmelCase ) ) )
_UpperCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
_UpperCAmelCase = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 352 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
import pytest
__A : int = "__dummy_dataset1__"
__A : Optional[Any] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = dataset_loading_script_name
_UpperCAmelCase = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase = script_dir / f'{script_name}.py'
with open(__lowerCAmelCase , '''w''' ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 353 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple:
_UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )]
if identifier is not None:
_UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for n_ in n_identifier:
_UpperCAmelCase = [file for file in files if n_ not in file]
else:
_UpperCAmelCase = [file for file in files if n_identifier not in file]
_UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __UpperCamelCase )
if only_modules:
_UpperCAmelCase = file.split('''.''' )[0]
try:
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase )
_UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
_UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''modeling'''
_UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''tokenization'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''configuration'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = Path('''docs/source''' )
_UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _a ( a__):
"""simple docstring"""
UpperCamelCase__ = """speech_to_text_2"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=1_0_0_0_0 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : int=2_0_4_8 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=0.0 , __UpperCamelCase : int=True , __UpperCamelCase : int="relu" , __UpperCamelCase : Any=2_5_6 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : int=2 , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Tuple=1_0_2_4 , **__UpperCamelCase : int , )->Dict:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_target_positions
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 354 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase ( _SCREAMING_SNAKE_CASE : bool = True , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_UpperCAmelCase = False
if main_process_only:
_UpperCAmelCase = PartialState().local_process_index == 0
return _tqdm(*__a , **__a , disable=__a )
| 355 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : List[str] = """Hello, World!"""
__A : Any = """en_XX"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
_UpperCAmelCase = Path('''data_bin''' )
_UpperCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ) , checkpoint_file=Path(__snake_case ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__snake_case ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__snake_case ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__snake_case )
_UpperCAmelCase = xmod.model.encoder.sentence_encoder
_UpperCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __snake_case )
_UpperCAmelCase = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCAmelCase = xmod_sent_encoder.embed_tokens.weight
_UpperCAmelCase = xmod_sent_encoder.embed_positions.weight
_UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
_UpperCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCAmelCase = model.roberta.encoder.layer[i]
_UpperCAmelCase = xmod_sent_encoder.layers[i]
# self attention
_UpperCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_UpperCAmelCase = xmod_layer.self_attn.q_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.q_proj.bias
_UpperCAmelCase = xmod_layer.self_attn.k_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.k_proj.bias
_UpperCAmelCase = xmod_layer.self_attn.v_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
_UpperCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_UpperCAmelCase = xmod_layer.self_attn.out_proj.weight
_UpperCAmelCase = xmod_layer.self_attn.out_proj.bias
_UpperCAmelCase = xmod_layer.self_attn_layer_norm.weight
_UpperCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
_UpperCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_UpperCAmelCase = xmod_layer.fca.weight
_UpperCAmelCase = xmod_layer.fca.bias
# output
_UpperCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_UpperCAmelCase = xmod_layer.fca.weight
_UpperCAmelCase = xmod_layer.fca.bias
_UpperCAmelCase = xmod_layer.final_layer_norm.weight
_UpperCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_UpperCAmelCase = xmod_layer.adapter_layer_norm.weight
_UpperCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_UpperCAmelCase = bert_output.adapter_modules[lang_code]
_UpperCAmelCase = xmod_layer.adapter_modules[lang_code]
_UpperCAmelCase = from_adapter.fca.weight
_UpperCAmelCase = from_adapter.fca.bias
_UpperCAmelCase = from_adapter.fca.weight
_UpperCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_UpperCAmelCase = xmod_sent_encoder.layer_norm.weight
_UpperCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.weight
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''].dense.bias
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_UpperCAmelCase = xmod.model.encoder.lm_head.dense.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.dense.bias
_UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
_UpperCAmelCase = xmod.model.encoder.lm_head.weight
_UpperCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCAmelCase = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
_UpperCAmelCase = model(__snake_case )[0]
if classification_head:
_UpperCAmelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__snake_case ) )
else:
_UpperCAmelCase = xmod.model(__snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
_UpperCAmelCase = torch.allclose(__snake_case , __snake_case , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A : int = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 356 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = os.path.abspath(__A )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_UpperCAmelCase = tf.train.list_variables(__A )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCAmelCase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
_UpperCAmelCase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__A )
# read data
_UpperCAmelCase = tf.train.load_variable(__A , __A )
names.append('''/'''.join(__A ) )
arrays.append(__A )
logger.info(f'Read a total of {len(__A ):,} layers' )
# Sanity check
if len(set(__A ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(__A ) )})' )
_UpperCAmelCase = list(set(__A ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__A , __A ):
_UpperCAmelCase = full_name.split('''/''' )
_UpperCAmelCase = model
_UpperCAmelCase = []
for i, m_name in enumerate(__A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
_UpperCAmelCase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
_UpperCAmelCase = getattr(__A , '''embeddings''' )
_UpperCAmelCase = getattr(__A , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
_UpperCAmelCase = getattr(__A , '''encoder''' )
_UpperCAmelCase = getattr(__A , '''layer''' )
_UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
_UpperCAmelCase = getattr(__A , '''pooler''' )
_UpperCAmelCase = getattr(__A , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
_UpperCAmelCase = getattr(__A , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
_UpperCAmelCase = getattr(__A , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
_UpperCAmelCase = getattr(__A , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
_UpperCAmelCase = getattr(__A , '''token_type_embeddings''' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('''weight''' )
_UpperCAmelCase = getattr(__A , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
_UpperCAmelCase = getattr(__A , '''attention''' )
_UpperCAmelCase = getattr(__A , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
_UpperCAmelCase = getattr(__A , '''attention''' )
_UpperCAmelCase = getattr(__A , '''output''' )
_UpperCAmelCase = getattr(__A , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
_UpperCAmelCase = getattr(__A , '''attention''' )
_UpperCAmelCase = getattr(__A , '''output''' )
_UpperCAmelCase = getattr(__A , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
_UpperCAmelCase = getattr(__A , '''output''' )
_UpperCAmelCase = getattr(__A , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
_UpperCAmelCase = getattr(__A , '''output''' )
_UpperCAmelCase = getattr(__A , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
_UpperCAmelCase = getattr(__A , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
_UpperCAmelCase = getattr(__A , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
_UpperCAmelCase = getattr(__A , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
_UpperCAmelCase = getattr(__A , '''intermediate''' )
_UpperCAmelCase = getattr(__A , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
_UpperCAmelCase = getattr(__A , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
_UpperCAmelCase = getattr(__A , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
_UpperCAmelCase = getattr(__A , '''weight''' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
_UpperCAmelCase = '''.'''.join(__A )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __A ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , __A ):
_UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCAmelCase = torch.from_numpy(__A )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
logger.info(f'Loading model based on config from {config_path}...' )
_UpperCAmelCase = BertConfig.from_json_file(__A )
_UpperCAmelCase = BertModel(__A )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(__A , __A , __A )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__A : Optional[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[str] = logging.get_logger("transformers.models.speecht5")
__A : Any = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
__A : Dict = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
__A : int = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
__A : List[str] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
__A : List[str] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
__A : Any = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
__A : Any = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
__A : Any = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
__A : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Any = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Union[str, Any] = []
__A : Union[str, Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
__A : Any = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
__A : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
__A : str = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_UpperCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_UpperCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase_ , lowerCamelCase_ ):
logger.info(f'{name} was ignored' )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(lowerCamelCase_ )[0].split('''.''' )[-2]
_UpperCAmelCase = mapped_key.replace('''*''' , lowerCamelCase_ )
if "weight_g" in name:
_UpperCAmelCase = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase = """weight_v"""
elif "bias" in name:
_UpperCAmelCase = """bias"""
elif "weight" in name:
_UpperCAmelCase = """weight"""
elif "running_mean" in name:
_UpperCAmelCase = """running_mean"""
elif "running_var" in name:
_UpperCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
_UpperCAmelCase = """num_batches_tracked"""
else:
_UpperCAmelCase = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f'Unused weights: {unused_weights}' )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCAmelCase = name.split('''.''' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(lowerCamelCase_ )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(lowerCamelCase_ )
elif task == "t2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = 600
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(lowerCamelCase_ )
elif task == "s2s":
_UpperCAmelCase = 1876
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(lowerCamelCase_ )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(lowerCamelCase_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken('''<mask>''' , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
_UpperCAmelCase = torch.load(lowerCamelCase_ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCamelCase_ , lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 358 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Dict = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class _a ( __A):
"""simple docstring"""
UpperCamelCase__ = 'imagegpt'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , __UpperCamelCase : int=5_1_2 + 1 , __UpperCamelCase : Dict=3_2 * 3_2 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Optional[Any]=2_4 , __UpperCamelCase : Tuple=8 , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]="quick_gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Dict=1e-5 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Union[str, Any] , )->List[str]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_attn_by_inverse_layer_idx
_UpperCAmelCase = reorder_and_upcast_attn
_UpperCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowercase , **__lowercase )
class _a ( __A):
"""simple docstring"""
@property
def lowercase__ ( self : Union[str, Any] )->List[Any]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] = 1 , __UpperCamelCase : Dict = -1 , __UpperCamelCase : int = False , __UpperCamelCase : List[Any] = None , __UpperCamelCase : List[Any] = 3 , __UpperCamelCase : List[str] = 3_2 , __UpperCamelCase : Any = 3_2 , )->Any:
_UpperCAmelCase = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
_UpperCAmelCase = dict(preprocessor(images=__lowercase , return_tensors=__lowercase ) )
return inputs
| 359 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=7_0000 ):
'''simple docstring'''
_UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = sigmoid_function(__lowerCamelCase )
_UpperCAmelCase = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase = theta - alpha * gradient # updating the weights
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = sigmoid_function(__lowerCamelCase )
_UpperCAmelCase = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A : Any = datasets.load_iris()
__A : List[Any] = iris.data[:, :2]
__A : Tuple = (iris.target != 0) * 1
__A : Optional[int] = 0.1
__A : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) , (__A)) : Optional[int] = (x[:, 0].min(), x[:, 0].max())
((__A) , (__A)) : List[str] = (x[:, 1].min(), x[:, 1].max())
((__A) , (__A)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
__A : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 360 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A : Dict = logging.get_logger(__name__)
class _a ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] )->List[str]:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
from collections import namedtuple
__A : Any = namedtuple("from_to", "from_ to")
__A : Tuple = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 0 |
from collections.abc import Callable
class _a :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : Callable | None = None )->None:
_UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase = {}
# Stores current size of heap.
_UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase = key or (lambda __UpperCamelCase : x)
def lowercase__ ( self : Any , __UpperCamelCase : int )->int | None:
return int((i - 1) / 2 ) if i > 0 else None
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int )->int | None:
_UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowercase__ ( self : Tuple , __UpperCamelCase : int )->int | None:
_UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int )->None:
_UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase = self.arr[j], self.arr[i]
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int )->bool:
return self.arr[i][1] < self.arr[j][1]
def lowercase__ ( self : Dict , __UpperCamelCase : int )->int:
_UpperCAmelCase = self._left(__UpperCamelCase )
_UpperCAmelCase = self._right(__UpperCamelCase )
_UpperCAmelCase = i
if left is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = left
if right is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = right
return valid_parent
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->None:
_UpperCAmelCase = self._parent(__UpperCamelCase )
while parent is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
self._swap(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parent, self._parent(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int )->None:
_UpperCAmelCase = self._get_valid_parent(__UpperCamelCase )
while valid_parent != index:
self._swap(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = valid_parent, self._get_valid_parent(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int )->None:
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
_UpperCAmelCase = [item, self.key(__UpperCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__UpperCamelCase )
self._heapify_down(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : int )->None:
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase = self.arr[self.size - 1]
_UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__UpperCamelCase )
self._heapify_down(__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : int )->None:
_UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__UpperCamelCase )] )
else:
_UpperCAmelCase = [item, self.key(__UpperCamelCase )]
_UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowercase__ ( self : str )->tuple | None:
return self.arr[0] if self.size else None
def lowercase__ ( self : Optional[Any] )->tuple | None:
_UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowercase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.log_outputs
_UpperCAmelCase = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
_UpperCAmelCase = load_metric('''wer''' )
_UpperCAmelCase = load_metric('''cer''' )
# compute metrics
_UpperCAmelCase = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
_UpperCAmelCase = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
_UpperCAmelCase = f'WER: {wer_result}\nCER: {cer_result}'
print(a_ )
with open(f'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(a_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_UpperCAmelCase = f'log_{dataset_id}_predictions.txt'
_UpperCAmelCase = f'log_{dataset_id}_targets.txt'
with open(a_ , '''w''' ) as p, open(a_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
p.write(f'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(a_ , with_indices=a_ )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_UpperCAmelCase = re.sub(a_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_UpperCAmelCase = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
_UpperCAmelCase = ''' '''.join(text.split(a_ ) )
return text
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
_UpperCAmelCase = feature_extractor.sampling_rate
# resample audio
_UpperCAmelCase = dataset.cast_column('''audio''' , Audio(sampling_rate=a_ ) )
# load eval pipeline
if args.device is None:
_UpperCAmelCase = 0 if torch.cuda.is_available() else -1
_UpperCAmelCase = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_UpperCAmelCase = prediction['''text''']
_UpperCAmelCase = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
_UpperCAmelCase = dataset.map(a_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a_ , a_ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__A : List[Any] = parser.parse_args()
main(args)
| 364 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A : Union[str, Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__A : List[Any] = parser.parse_args()
__A : List[str] = "cpu"
__A : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__A : int = "path-to-your-trained-model"
__A : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A : List[str] = pipe.to(device)
# to channels last
__A : List[Any] = pipe.unet.to(memory_format=torch.channels_last)
__A : Dict = pipe.vae.to(memory_format=torch.channels_last)
__A : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A : str = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A : Union[str, Any] = torch.randn(2, 4, 64, 64)
__A : List[Any] = torch.rand(1) * 999
__A : str = torch.randn(2, 77, 768)
__A : str = (sample, timestep, encoder_hidden_status)
try:
__A : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A : Dict = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A : Dict = 666
__A : Tuple = torch.Generator(device).manual_seed(seed)
__A : List[str] = {"generator": generator}
if args.steps is not None:
__A : Union[str, Any] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A : Dict = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 365 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
__A : str = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30522, type=int)
__A : Dict = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
__A : List[str] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : Any = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[Any] = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 366 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[str]=9_9 , __UpperCamelCase : int=3_2 , __UpperCamelCase : str=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : int=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=None , )->Union[str, Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int )->str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict )->str:
_UpperCAmelCase = DistilBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] )->Dict:
_UpperCAmelCase = DistilBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = DistilBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DistilBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->List[str]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DistilBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] )->Optional[int]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = DistilBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = self.prepare_config_and_inputs()
(_UpperCAmelCase) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = DistilBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def lowercase__ ( self : Union[str, Any] )->Any:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def lowercase__ ( self : Dict )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def lowercase__ ( self : Optional[Any] )->str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=lowerCAmelCase__ )
_UpperCAmelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''traced_model.pt''' ) )
_UpperCAmelCase = torch.jit.load(os.path.join(lowerCAmelCase__ , '''traced_model.pt''' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase__ ) , inputs_dict['''attention_mask'''].to(lowerCAmelCase__ ) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Optional[Any]:
_UpperCAmelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 367 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from __future__ import annotations
__A : Optional[int] = list[tuple[int, int]]
__A : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Tuple = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _a :
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , )->List[str]:
_UpperCAmelCase = pos_x
_UpperCAmelCase = pos_y
_UpperCAmelCase = (pos_y, pos_x)
_UpperCAmelCase = goal_x
_UpperCAmelCase = goal_y
_UpperCAmelCase = g_cost
_UpperCAmelCase = parent
_UpperCAmelCase = self.calculate_heuristic()
def lowercase__ ( self : int )->float:
_UpperCAmelCase = abs(self.pos_x - self.goal_x )
_UpperCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[Any] , __UpperCamelCase : str )->bool:
return self.f_cost < other.f_cost
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] )->List[str]:
_UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [self.start]
_UpperCAmelCase = []
_UpperCAmelCase = False
def lowercase__ ( self : Any )->Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
_UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Any )->list[Node]:
_UpperCAmelCase = []
for action in delta:
_UpperCAmelCase = parent.pos_x + action[1]
_UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def lowercase__ ( self : List[Any] , __UpperCamelCase : str )->Path:
_UpperCAmelCase = node
_UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : Tuple = (0, 0)
__A : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : Optional[Any] = GreedyBestFirst(init, goal)
__A : str = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : Optional[Any] = 2
for elem in grid:
print(elem)
| 368 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _a ( a__):
"""simple docstring"""
UpperCamelCase__ = 'philschmid/bart-large-cnn-samsum'
UpperCamelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCamelCase__ = 'summarizer'
UpperCamelCase__ = AutoTokenizer
UpperCamelCase__ = AutoModelForSeqaSeqLM
UpperCamelCase__ = ['text']
UpperCamelCase__ = ['text']
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] )->Tuple:
return self.pre_processor(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : str )->List[Any]:
return self.model.generate(**_lowerCamelCase )[0]
def lowercase__ ( self : str , __UpperCamelCase : str )->Tuple:
return self.pre_processor.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
| 369 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[str] = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _a ( __UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = "xlm"
UpperCamelCase__ = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , __UpperCamelCase : Optional[Any]=3_0_1_4_5 , __UpperCamelCase : Optional[Any]=2_0_4_8 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : int=1_6 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Dict=1 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : List[Any]=2_0_4_8**-0.5 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple="first" , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : Tuple , )->Optional[Any]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = emb_dim
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = use_lang_emb
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = bos_index
_UpperCAmelCase = eos_index
_UpperCAmelCase = pad_index
_UpperCAmelCase = unk_index
_UpperCAmelCase = mask_index
_UpperCAmelCase = is_encoder
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = embed_init_std
_UpperCAmelCase = init_std
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_proj_to_labels
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = start_n_top
_UpperCAmelCase = end_n_top
_UpperCAmelCase = mask_token_id
_UpperCAmelCase = lang_id
if "n_words" in kwargs:
_UpperCAmelCase = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class _a ( __UpperCamelCase):
"""simple docstring"""
@property
def lowercase__ ( self : Dict )->Optional[int]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 370 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = DDIMPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def lowercase__ ( self : Any )->List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'''unet''': unet, '''scheduler''': scheduler}
return components
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int=0 )->List[Any]:
if str(_a ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_a )
else:
_UpperCAmelCase = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase = self.get_dummy_inputs(_a )
_UpperCAmelCase = pipe(**_a ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_UpperCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def lowercase__ ( self : Any )->Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase__ ( self : Any )->Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase__ ( self : int )->Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = '''google/ddpm-cifar10-32'''
_UpperCAmelCase = UNetaDModel.from_pretrained(_a )
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddim(generator=_a , eta=0.0 , output_type='''numpy''' ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = '''google/ddpm-ema-bedroom-256'''
_UpperCAmelCase = UNetaDModel.from_pretrained(_a )
_UpperCAmelCase = DDIMScheduler.from_pretrained(_a )
_UpperCAmelCase = DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ddpm(generator=_a , output_type='''numpy''' ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_UpperCAmelCase = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class _a ( lowercase__):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any )->int:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 351 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : List[str] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case , _snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : List[Any] = 50000
__A : Dict = 5000
__A : Optional[Any] = os.path.split(__file__)
__A : Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for i in range(_A ):
_UpperCAmelCase = dataset[i]
@get_duration
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for i in range(0 , len(_A ) , _A ):
_UpperCAmelCase = dataset[i : i + batch_size]
@get_duration
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with dataset.formatted_as(type=_A ):
for i in range(_A ):
_UpperCAmelCase = dataset[i]
@get_duration
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
with dataset.formatted_as(type=_A ):
for i in range(0 , _A , _A ):
_UpperCAmelCase = dataset[i : i + batch_size]
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
_UpperCAmelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
_UpperCAmelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
_UpperCAmelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
_UpperCAmelCase = generate_example_dataset(
os.path.join(_A , '''dataset.arrow''' ) , _A , num_examples=_A , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(_A ) )
_UpperCAmelCase = func(_A , **_A )
print('''shuffling dataset''' )
_UpperCAmelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(_A ) )
_UpperCAmelCase = func(
_A , **_A )
with open(_A , '''wb''' ) as f:
f.write(json.dumps(_A ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 353 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple:
_UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )]
if identifier is not None:
_UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for n_ in n_identifier:
_UpperCAmelCase = [file for file in files if n_ not in file]
else:
_UpperCAmelCase = [file for file in files if n_identifier not in file]
_UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __UpperCamelCase )
if only_modules:
_UpperCAmelCase = file.split('''.''' )[0]
try:
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase )
_UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
_UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''modeling'''
_UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''tokenization'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''configuration'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = Path('''docs/source''' )
_UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase__ = Features({"""audio""": Audio()})
UpperCamelCase__ = Features({"""transcription""": Value("""string""")})
UpperCamelCase__ = """audio"""
UpperCamelCase__ = """transcription"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Dict )->Optional[int]:
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , __UpperCAmelCase ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
_UpperCAmelCase = copy.deepcopy(self )
_UpperCAmelCase = self.input_schema.copy()
_UpperCAmelCase = features[self.audio_column]
_UpperCAmelCase = input_schema
return task_template
@property
def lowercase__ ( self : Optional[Any] )->Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 354 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _a ( _UpperCAmelCase):
"""simple docstring"""
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : List[Any] )->Any:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowercase__ ( self : str )->List[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def lowercase__ ( self : Dict )->Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : Union[str, Any] )->List[str]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def lowercase__ ( self : List[Any] )->Any:
_UpperCAmelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowercase__ ( self : Optional[int] )->List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferReader(snake_case_ ) if isinstance(snake_case_ , pa.Buffer ) else pa.memory_map(snake_case_ )
_UpperCAmelCase = pa.ipc.open_stream(snake_case_ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ , schema=snake_case_ , writer_batch_size=snake_case_ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=snake_case_ , features=snake_case_ ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(snake_case_ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(snake_case_ )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ , writer_batch_size=snake_case_ , hash_salt='''split_name''' , check_duplicates=snake_case_ , ) as writer:
with pytest.raises(snake_case_ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ , writer_batch_size=snake_case_ , hash_salt='''split_name''' , check_duplicates=snake_case_ , ) as writer:
with pytest.raises(snake_case_ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=snake_case_ , writer_batch_size=snake_case_ , hash_salt='''split_name''' , check_duplicates=snake_case_ , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ , schema=snake_case_ , writer_batch_size=snake_case_ ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ , schema=snake_case_ , writer_batch_size=snake_case_ ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(snake_case_ ) if fields else None
with ArrowWriter(stream=snake_case_ , schema=snake_case_ , writer_batch_size=snake_case_ ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(snake_case_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(snake_case_ , '''test.arrow''' )
with ArrowWriter(path=snake_case_ , schema=pa.schema(snake_case_ ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(snake_case_ , metadata=writer._schema.metadata )
_check_output(snake_case_ , 1 )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if pa.types.is_list(snake_case_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if isinstance(lst[0] , snake_case_ ):
change_first_primitive_element_in_list(lst[0] , snake_case_ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(snake_case_ , optimized_int_type=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = pa.array(OptimizedTypedSequence(snake_case_ , col=snake_case_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(snake_case_ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(snake_case_ , snake_case_ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(snake_case_ , col=snake_case_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=snake_case_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=snake_case_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(snake_case_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(snake_case_ )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=snake_case_ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(snake_case_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(snake_case_ , format='''png''' )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=snake_case_ , features=Features({'''image''': Image()} ) , embed_local_files=snake_case_ ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(snake_case_ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , snake_case_ )
with open(snake_case_ , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = pa.schema([pa.field('''col_1''' , pa.string() , nullable=snake_case_ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=snake_case_ ) as writer:
writer._build_writer(inferred_schema=snake_case_ )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 355 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__A : Dict = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
__A : Any = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
__A : str = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
__A : Optional[int] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
__A : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
__A : Optional[int] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
__A : Tuple = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
_UpperCAmelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase ( _SCREAMING_SNAKE_CASE : int = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , lowercase__ )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [PokerHand(lowercase__ ) for hand in SORTED_HANDS]
_UpperCAmelCase = poker_hands.copy()
shuffle(lowercase__ )
_UpperCAmelCase = chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = PokerHand('''2C 4S AS 3D 5C''' )
_UpperCAmelCase = True
_UpperCAmelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = os.path.abspath(os.path.dirname(lowercase__ ) )
_UpperCAmelCase = os.path.join(lowercase__ , '''poker_hands.txt''' )
with open(lowercase__ ) as file_hand:
for line in file_hand:
_UpperCAmelCase = line[:14].strip()
_UpperCAmelCase = line[15:].strip()
_UpperCAmelCase = PokerHand(lowercase__ ), PokerHand(lowercase__ )
_UpperCAmelCase = player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 376
| 356 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return abs(A__ ) if a == 0 else greatest_common_divisor(b % a , A__ )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_UpperCAmelCase , _UpperCAmelCase = y, x % y
return abs(A__ )
def lowercase ( ):
'''simple docstring'''
try:
_UpperCAmelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
_UpperCAmelCase = int(nums[0] )
_UpperCAmelCase = int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(A__ , A__ )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(A__ , A__ )}' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 0 |
"""simple docstring"""
import argparse
import json
import subprocess
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_UpperCAmelCase = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
_UpperCAmelCase = output.stdout.decode('''utf-8''' )
_UpperCAmelCase = json.loads(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return values.split(''',''' )
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__A : int = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 358 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase , _UpperCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__A : List[str] = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 359 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'CLIPImageProcessor'
UpperCamelCase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Dict , __UpperCamelCase : Dict=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Tuple )->Union[str, Any]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : Tuple , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] )->Union[str, Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
_UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def lowercase__ ( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] )->List[Any]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[int] )->Optional[int]:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowercase__ ( self : Any )->Dict:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Any )->List[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Optional[Any] )->List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 360 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
from PIL import Image
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
__A : Tuple = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 362 |
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326 | 0 |
from collections.abc import Sequence
def lowercase ( _SCREAMING_SNAKE_CASE : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_UpperCAmelCase = nums[0]
for i in range(1 , len(UpperCamelCase__ ) ):
_UpperCAmelCase = nums[i]
_UpperCAmelCase = max(UpperCamelCase__ , ans + num , UpperCamelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__A : Optional[Any] = int(input("Enter number of elements : ").strip())
__A : Optional[int] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 363 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : Optional[Any] = "cpu" , __UpperCamelCase : str = "openai/clip-vit-large-patch14" )->None:
_UpperCAmelCase = device
_UpperCAmelCase = CLIPTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
_UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
_UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_UpperCAmelCase = torchvision.transforms.Resize(2_2_4 )
_UpperCAmelCase = torchvision.transforms.CenterCrop(2_2_4 )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.resize(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.center_crop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.normalize(_SCREAMING_SNAKE_CASE )
return images
def __call__( self : Union[str, Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[Any] )->int:
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.preprocess_img(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Any=0.0_1 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : str=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[int]="image" , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : str=False , __UpperCamelCase : List[str]=False , )->None:
super().__init__()
_UpperCAmelCase = None
_UpperCAmelCase = device if device else get_device()
if vqgan:
_UpperCAmelCase = vqgan
else:
_UpperCAmelCase = load_vqgan(self.device , conf_path=_SCREAMING_SNAKE_CASE , ckpt_path=_SCREAMING_SNAKE_CASE )
self.vqgan.eval()
if clip:
_UpperCAmelCase = clip
else:
_UpperCAmelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
_UpperCAmelCase = ProcessorGradientFlow(device=self.device )
_UpperCAmelCase = iterations
_UpperCAmelCase = lr
_UpperCAmelCase = log
_UpperCAmelCase = make_grid
_UpperCAmelCase = return_val
_UpperCAmelCase = quantize
_UpperCAmelCase = self.vqgan.decoder.z_shape
def lowercase__ ( self : int , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : Optional[int]=True )->List[str]:
_UpperCAmelCase = []
if output_path is None:
_UpperCAmelCase = "./animation.gif"
if input_path is None:
_UpperCAmelCase = self.save_path
_UpperCAmelCase = sorted(glob(input_path + '''/*''' ) )
if not len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
_UpperCAmelCase = total_duration / len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [frame_duration] * len(_SCREAMING_SNAKE_CASE )
if extend_frames:
_UpperCAmelCase = 1.5
_UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_SCREAMING_SNAKE_CASE ) )
imageio.mimsave(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , duration=_SCREAMING_SNAKE_CASE )
print(F'gif saved to {output_path}' )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Any=None )->Any:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
_UpperCAmelCase = preprocess(Image.open(_SCREAMING_SNAKE_CASE ) , target_image_size=2_5_6 ).to(self.device )
_UpperCAmelCase = preprocess_vqgan(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.vqgan.encode(_SCREAMING_SNAKE_CASE )
return z
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = self.latent.detach().requires_grad_()
_UpperCAmelCase = base_latent + transform_vector
if self.quantize:
_UpperCAmelCase = self.vqgan.quantize(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = trans_latent
return self.vqgan.decode(_SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple=None )->Any:
_UpperCAmelCase = self.clip_preprocessor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.clip(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
_UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] )->Optional[Any]:
_UpperCAmelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , _SCREAMING_SNAKE_CASE , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
_UpperCAmelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , _SCREAMING_SNAKE_CASE , weights=neg_prompts['''weights'''] )
else:
_UpperCAmelCase = torch.tensor([1] , device=self.device )
_UpperCAmelCase = -torch.log(_SCREAMING_SNAKE_CASE ) + torch.log(_SCREAMING_SNAKE_CASE )
return loss
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int )->str:
_UpperCAmelCase = torch.randn_like(self.latent , requires_grad=_SCREAMING_SNAKE_CASE , device=self.device )
_UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_UpperCAmelCase = self._add_vector(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = loop_post_process(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._get_CLIP_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('''CLIP loss''' , _SCREAMING_SNAKE_CASE )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int )->Tuple:
wandb.init(reinit=_SCREAMING_SNAKE_CASE , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
_UpperCAmelCase = Image.open(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' , wandb.Image(_SCREAMING_SNAKE_CASE ) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str )->Optional[int]:
if not prompts:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_SCREAMING_SNAKE_CASE , (tuple, list) ):
_UpperCAmelCase = prompt[0]
_UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
_UpperCAmelCase = prompt.split(''':''' )
_UpperCAmelCase = float(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = prompt
_UpperCAmelCase = 1.0
processed_prompts.append(_SCREAMING_SNAKE_CASE )
weights.append(_SCREAMING_SNAKE_CASE )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_SCREAMING_SNAKE_CASE , device=self.device ),
}
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any=False , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=None , )->Optional[Any]:
if image_path:
_UpperCAmelCase = self._get_latent(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert pos_prompts, "You must provide at least one positive prompt."
_UpperCAmelCase = self.process_prompts(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.process_prompts(_SCREAMING_SNAKE_CASE )
if save_final and save_path is None:
_UpperCAmelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = save_path + "_" + get_timestamp()
os.makedirs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = save_path
_UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = loop_post_process(_SCREAMING_SNAKE_CASE )
for iter, transformed_img in enumerate(self._optimize_CLIP(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
if show_intermediate:
show_pil(_SCREAMING_SNAKE_CASE )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_SCREAMING_SNAKE_CASE )} )
if show_final:
show_pil(_SCREAMING_SNAKE_CASE )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 364 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__A : Optional[int] = logging.get_logger(__name__)
@dataclass
class _a ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : List[Any] , **__UpperCamelCase : Dict )->str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCAmelCase = deprecated_arg[3:]
setattr(self , UpperCAmelCase_ , not kwargs.pop(UpperCAmelCase_ ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
_UpperCAmelCase = kwargs.pop('''torchscript''' , self.torchscript )
_UpperCAmelCase = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
_UpperCAmelCase = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCAmelCase_ )
UpperCamelCase__ = field(default=snake_case__ , metadata={"""help""": """Trace the models using torchscript"""})
UpperCamelCase__ = field(default=snake_case__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""})
UpperCamelCase__ = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def lowercase__ ( self : List[Any] )->Tuple:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
_UpperCAmelCase = torch.device('''cpu''' )
_UpperCAmelCase = 0
elif is_torch_tpu_available():
_UpperCAmelCase = xm.xla_device()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_UpperCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def lowercase__ ( self : Optional[Any] )->int:
return is_torch_tpu_available() and self.tpu
@property
def lowercase__ ( self : Optional[Any] )->Dict:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowercase__ ( self : str )->Optional[int]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def lowercase__ ( self : Optional[int] )->List[Any]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def lowercase__ ( self : Dict )->Any:
return self.n_gpu > 0
| 365 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '''0''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '''1''' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a ( lowerCamelCase_):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Dict = False , __UpperCamelCase : Any = False , __UpperCamelCase : Tuple = None , __UpperCamelCase : Dict = None , **__UpperCamelCase : int , )->Optional[int]:
super().__init__(
features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase = Generator(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , generator=lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
def lowercase__ ( self : Tuple )->Any:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 366 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Any] = False , __UpperCamelCase : Optional[int] = False , __UpperCamelCase : Optional[Any] = None , **__UpperCamelCase : List[Any] , )->Dict:
_UpperCAmelCase = path_or_paths
_UpperCAmelCase = split if split or isinstance(__lowerCamelCase , __lowerCamelCase ) else '''train'''
_UpperCAmelCase = features
_UpperCAmelCase = cache_dir
_UpperCAmelCase = keep_in_memory
_UpperCAmelCase = streaming
_UpperCAmelCase = num_proc
_UpperCAmelCase = kwargs
@abstractmethod
def lowercase__ ( self : Union[str, Any] )->List[str]:
pass
class _a ( UpperCamelCase__):
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : Optional[int] = False , __UpperCamelCase : Optional[int] = False , __UpperCamelCase : Dict = None , **__UpperCamelCase : int , )->List[str]:
_UpperCAmelCase = features
_UpperCAmelCase = cache_dir
_UpperCAmelCase = keep_in_memory
_UpperCAmelCase = streaming
_UpperCAmelCase = num_proc
_UpperCAmelCase = kwargs
@abstractmethod
def lowercase__ ( self : str )->Optional[int]:
pass
| 367 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCamelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self : int , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : str ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCamelCase , **__lowerCamelCase )
return wrapper
| 368 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
__A : Tuple = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__A : List[Any] = ["a", "b", "c", "d", "e"]
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = start
# add current to visited
visited.append(_snake_case )
_UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_UpperCAmelCase = topological_sort(_snake_case , _snake_case , _snake_case )
# if all neighbors visited add current to sort
sort.append(_snake_case )
# if all vertices haven't been visited select a new one to visit
if len(_snake_case ) != len(_snake_case ):
for vertice in vertices:
if vertice not in visited:
_UpperCAmelCase = topological_sort(_snake_case , _snake_case , _snake_case )
# return sort
return sort
if __name__ == "__main__":
__A : str = topological_sort("a", [], [])
print(sort)
| 369 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class _a ( logging.LoggerAdapter):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] )->int:
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCAmelCase = kwargs.pop('''main_process_only''' , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''in_order''' , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
_UpperCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
_UpperCAmelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = logging.getLogger(_SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_SCREAMING_SNAKE_CASE , {} )
| 326 | 0 |
"""simple docstring"""
import re
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if len(re.findall('''[ATCG]''' , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : List[Any] = logging.get_logger(__name__)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Tuple , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_5_6}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] )->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : str , )->List[Any]:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
from math import factorial
__A : Union[str, Any] = {str(digit): factorial(digit) for digit in range(10)}
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCAmelCase ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple = 60 , _SCREAMING_SNAKE_CASE : Dict = 100_0000 ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
_UpperCAmelCase = 0
# the cached sizes of the previous chains
_UpperCAmelCase = {}
for start_chain_element in range(1 , _lowerCAmelCase ):
# The temporary set will contain the elements of the chain
_UpperCAmelCase = set()
_UpperCAmelCase = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_UpperCAmelCase = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCAmelCase )
chain_set_length += 1
_UpperCAmelCase = digit_factorial_sum(_lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_UpperCAmelCase = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( __lowerCamelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = DanceDiffusionPipeline
UpperCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
UpperCamelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Optional[int] )->Optional[Any]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowercase , use_timestep_embedding=__lowercase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=0 )->int:
if str(__lowercase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(__lowercase )
else:
_UpperCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**__lowercase )
_UpperCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
_UpperCAmelCase = self.get_dummy_inputs(__lowercase )
_UpperCAmelCase = pipe(**__lowercase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase__ ( self : List[Any] )->Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self : List[Any] )->Union[str, Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase__ ( self : Union[str, Any] )->Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase__ ( self : Optional[int] )->Optional[Any]:
return super().test_attention_slicing_forward_pass()
def lowercase__ ( self : Union[str, Any] )->Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Optional[int] )->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str )->Union[str, Any]:
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=__lowercase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_9_6 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=__lowercase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_9_6 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
__A : Union[str, Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_SCREAMING_SNAKE_CASE ) != count_coins(_SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.left )
_UpperCAmelCase , _UpperCAmelCase = get_distrib(node.right )
_UpperCAmelCase = 1 - left_distrib_excess
_UpperCAmelCase = 1 - right_distrib_excess
_UpperCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_SCREAMING_SNAKE_CASE )
+ abs(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return get_distrib(_SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
_UpperCAmelCase = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
DownloadCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
RunCommand.register_subcommand(_UpperCAmelCase )
ServeCommand.register_subcommand(_UpperCAmelCase )
UserCommands.register_subcommand(_UpperCAmelCase )
AddNewModelCommand.register_subcommand(_UpperCAmelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase )
LfsCommands.register_subcommand(_UpperCAmelCase )
PTtoTFCommand.register_subcommand(_UpperCAmelCase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(_UpperCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase = args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def lowercase__ ( *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple )->Optional[Any]:
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@require_torch
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def lowercase__ ( self : List[Any] )->List[Any]:
_UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': 0.3_3_3, '''label''': ANY(__SCREAMING_SNAKE_CASE )},
],
] , )
@slow
@require_torch
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase = image_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 352 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( snake_case_):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : int = True , __UpperCamelCase : Dict = None , __UpperCamelCase : Any = False , __UpperCamelCase : Dict = None , __UpperCamelCase : List[str] = True , __UpperCamelCase : int = "arrow" , **__UpperCamelCase : str , )->Optional[Any]:
super().__init__(
split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = load_from_cache_file
_UpperCAmelCase = file_format
_UpperCAmelCase = Spark(
df=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , working_dir=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : int )->Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 353 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__A : Tuple = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Path , __UpperCamelCase : Union[str, None] = None , __UpperCamelCase : Union[List[str], None] = None , __UpperCamelCase : Union[str, List[str], None] = None , __UpperCamelCase : bool = True , )->Tuple:
_UpperCAmelCase = [file for file in os.listdir(__UpperCamelCase ) if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) )]
if identifier is not None:
_UpperCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
for n_ in n_identifier:
_UpperCAmelCase = [file for file in files if n_ not in file]
else:
_UpperCAmelCase = [file for file in files if n_identifier not in file]
_UpperCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_UpperCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __UpperCamelCase )
if only_modules:
_UpperCAmelCase = file.split('''.''' )[0]
try:
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = doctest.DocTestSuite(__UpperCamelCase )
_UpperCAmelCase = unittest.TextTestRunner().run(__UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
_UpperCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : str )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''modeling'''
_UpperCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase , ignore_files=__UpperCamelCase )
def lowercase__ ( self : List[Any] )->int:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''tokenization'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = '''configuration'''
self.analyze_directory(__UpperCamelCase , identifier=__UpperCamelCase )
def lowercase__ ( self : int )->Optional[Any]:
_UpperCAmelCase = Path('''src/transformers''' )
_UpperCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__UpperCamelCase , n_identifier=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = Path('''docs/source''' )
_UpperCAmelCase = ['''favicon.ico''']
self.analyze_directory(__UpperCamelCase , ignore_files=__UpperCamelCase , only_modules=__UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase = b % a, a
return b
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if gcd(_lowerCAmelCase , _lowerCAmelCase ) != 1:
_UpperCAmelCase = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 1, m
while va != 0:
_UpperCAmelCase = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 354 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=0.999 , _SCREAMING_SNAKE_CASE : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = 1
@register_to_config
def __init__( self : List[Any] , __UpperCamelCase : int = 1_0_0_0 , __UpperCamelCase : float = 0.0_0_0_1 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : str = "linear" , __UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : str = "epsilon" , __UpperCamelCase : float = 1.0 , **__UpperCamelCase : Optional[int] , )->Dict:
if kwargs.get('''set_alpha_to_one''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_UpperCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , __UpperCamelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : str , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None )->torch.FloatTensor:
return sample
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None )->Any:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , __UpperCamelCase ) * step_ratio).round().copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : Any , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float = 0.0 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : bool = True , )->Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
_UpperCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_UpperCAmelCase = self.alphas_cumprod[timestep]
_UpperCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_UpperCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_UpperCAmelCase = model_output
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
_UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_UpperCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __len__( self : Any )->str:
return self.config.num_train_timesteps
| 326 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.