code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_UpperCamelCase ):
"""simple docstring"""
a_ = ["""onnx"""]
def __init__( self : Optional[int] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ['onnx'] )
@classmethod
def lowercase ( cls : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any ) -> List[str]:
requires_backends(cls , ['onnx'] )
@classmethod
def lowercase ( cls : str , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['onnx'] )
| 284 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 1 |
'''simple docstring'''
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a_ = open # noqa: we just need to have a builtin inside this module to test it properly | 369 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE__ : List[str] =(l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ : Dict =m
else:
SCREAMING_SNAKE_CASE__ : Any =m # noqa: E741
return r
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[0]
for i in range(1, len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ : List[Any] =v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ : int =v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 | 0 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
UpperCamelCase :Union[str, Any] = sorted(string.lower() )
return len(__magic_name__ ) == len(set(__magic_name__ ) )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('''Enter a string ''').strip()
UpperCAmelCase_ : List[str] = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 38 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" )
UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" )
UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 38 | 1 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
snake_case_ : List[Any] = TypeVar("T")
class __a (Generic[T] ):
__a : deque[T] # Cache store of keys
__a : set[T] # References of the keys in cache
__a : int = 10 # Maximum capacity of cache
def __init__( self : Any , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : str = deque()
UpperCAmelCase_ : Dict = set()
if not n:
UpperCAmelCase_ : int = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
UpperCAmelCase_ : int = n
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase_ : Any = self.dq_store.pop()
self.key_reference.remove(__magic_name__ )
else:
self.dq_store.remove(__magic_name__ )
self.dq_store.appendleft(__magic_name__ )
self.key_reference.add(__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(__magic_name__ )
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 125 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case_ : Union[str, Any] = 50_00_00
snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__)
snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase_ : Dict = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples['''text'''] )
UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 125 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Dict ) -> Dict:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Tuple:
'''simple docstring'''
if issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = parquet_path
elif issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : str , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int ) -> List[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase = {"image": [image_path]}
_UpperCAmelCase = Features({"image": Image()} )
_UpperCAmelCase = Dataset.from_dict(__lowercase , features=__lowercase )
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
assert get_writer_batch_size(__lowercase ) == expected
| 355 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : str
_lowerCamelCase : int
def UpperCAmelCase_ ( __lowercase : str ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def UpperCAmelCase_ ( __lowercase : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCAmelCase = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCAmelCase = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCAmelCase = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''Provide a string that I will generate its BWT transform: '''
__SCREAMING_SNAKE_CASE :Dict = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE :Optional[int] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__SCREAMING_SNAKE_CASE :Optional[int] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 156 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = False, False, False
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCamelCase : str = field(default='''Audio''' , init=snake_case_ , repr=snake_case_ )
def __call__(self ) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_A , _A ):
return {"bytes": None, "path": value}
elif isinstance(_A , _A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE__ : Tuple = BytesIO()
sf.write(_A , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
SCREAMING_SNAKE_CASE__ : List[Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
SCREAMING_SNAKE_CASE__ : List[str] = BytesIO(bytes() )
sf.write(_A , _A , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
SCREAMING_SNAKE_CASE__ : Any = (value['path'], BytesIO(value["""bytes"""] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
SCREAMING_SNAKE_CASE__ : str = xsplitext(_A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
SCREAMING_SNAKE_CASE__ : str = token_per_repo_id or {}
SCREAMING_SNAKE_CASE__ : str = path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE__ : Tuple = string_to_dict(_A , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE__ : Any = None
with xopen(_A , """rb""" , use_auth_token=_A ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sf.read(_A )
else:
SCREAMING_SNAKE_CASE__ : Tuple = sf.read(_A )
SCREAMING_SNAKE_CASE__ : Any = array.T
if self.mono:
SCREAMING_SNAKE_CASE__ : List[str] = librosa.to_mono(_A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : str = pa.array([None] * len(_A ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(_A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
SCREAMING_SNAKE_CASE__ : int = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Dict = storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array([None] * len(_A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE__ : str = pa.array([None] * len(_A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_A , self.pa_type )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ ):
with xopen(_A , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Dict = pa.array(
[os.path.basename(_A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_A , self.pa_type )
| 25 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case_:
def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Dict=1_0 , UpperCamelCase_ : str=3 , UpperCamelCase_ : str=3_2 * 4 , UpperCamelCase_ : Dict=3_2 * 6 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : str=3_2 , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : str = is_training
lowerCAmelCase : Optional[int] = use_auxiliary_loss
lowerCAmelCase : str = num_queries
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Dict = min_size
lowerCAmelCase : List[str] = max_size
lowerCAmelCase : List[str] = num_labels
lowerCAmelCase : Tuple = mask_feature_size
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
lowerCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
lowerCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase__ ( self : Tuple ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ):
lowerCAmelCase : List[str] = output.encoder_hidden_states
lowerCAmelCase : Union[str, Any] = output.pixel_decoder_hidden_states
lowerCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False ):
with torch.no_grad():
lowerCAmelCase : List[str] = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Any = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_ : Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
lowerCAmelCase : Any = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = MaskFormerModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Tuple ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def lowerCamelCase__ ( self : str ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def lowerCamelCase__ ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase__ ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
lowerCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[str] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase : List[str] = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[str] = (self.model_tester.min_size,) * 2
lowerCAmelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=UpperCamelCase_ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=UpperCamelCase_ ).long(),
}
lowerCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
lowerCAmelCase : str = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase : Tuple = self.all_model_classes[1]
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase__ ( self : List[str] ):
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase : Tuple = self.all_model_classes[1]
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
lowerCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : Union[str, Any] = 1e-4
def _snake_case ( ):
lowerCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : int ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[int] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(UpperCamelCase_ )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : Dict = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
lowerCAmelCase : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ )
lowerCAmelCase : int = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Any = prepare_img()
lowerCAmelCase : List[Any] = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
lowerCAmelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase : str = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase : List[Any] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCAmelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : Dict = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(UpperCamelCase_ )
.eval()
)
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[str] = prepare_img()
lowerCAmelCase : int = image_processor(UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
lowerCAmelCase : int = model(**UpperCamelCase_ )
# masks_queries_logits
lowerCAmelCase : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase : Optional[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowerCAmelCase : Dict = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
lowerCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : str = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(UpperCamelCase_ )
.eval()
)
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowerCAmelCase : Union[str, Any] = inputs['''pixel_values'''].to(UpperCamelCase_ )
lowerCAmelCase : Any = [el.to(UpperCamelCase_ ) for el in inputs['''mask_labels''']]
lowerCAmelCase : Union[str, Any] = [el.to(UpperCamelCase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 314 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowercase_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowercase_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Dict="dummy_doc" ) -> List[str]:
__a = {doc: key_lines}
__a = {doc: sys_lines}
__a = {}
__a = 0
__a = 0
__a = 0
__a = 0
__a = 0
__a = 0
__a , __a = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__a = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__a = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
__a , __a = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__a , __a = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__a = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
__a = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
__a = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
__a = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = {}
__a = 0
__a = 0
for name, metric in metrics:
__a , __a , __a = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
__a = (conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ : List[Any] ) -> List[Any]:
__a = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
__a = line.split()[5]
if not parse_col == "-":
__a = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __UpperCAmelCase ( self , _a , _a , _a=True , _a=False , _a=False , _a=False ):
__a = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
__a = util.check_gold_parse_annotation(A_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__a = evaluate(
key_lines=A_ , sys_lines=A_ , metrics=A_ , NP_only=A_ , remove_nested=A_ , keep_singletons=A_ , min_span=A_ , )
return score
| 45 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sgugger/tiny-distilbert-classification'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
UpperCamelCase = None
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(A_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(A_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(A_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(A_ , 'env.csv' ) , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'env.csv' ) ).exists() )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ ):
self.assertTrue(hasattr(A_ , 'sequential' ) )
self.assertTrue(hasattr(A_ , 'cumulative' ) )
self.assertTrue(hasattr(A_ , 'current' ) )
self.assertTrue(hasattr(A_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , 'log.txt' ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , 'log.txt' ) ).exists() )
| 222 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 16 ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[str] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : int = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ : Tuple = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
snake_case_ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCamelCase ) == "1":
snake_case_ : Tuple = 2
# New Code #
snake_case_ : Tuple = int(args.gradient_accumulation_steps )
snake_case_ : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Any = config['''lr''']
snake_case_ : Any = int(config['''num_epochs'''] )
snake_case_ : Tuple = int(config['''seed'''] )
snake_case_ : Dict = int(config['''batch_size'''] )
snake_case_ : List[str] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCamelCase )
snake_case_ , snake_case_ : int = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Tuple = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
snake_case_ : str = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCamelCase , model=_UpperCamelCase , local_sgd_steps=_UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
snake_case_ : List[str] = model(**_UpperCamelCase )
snake_case_ : int = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**_UpperCamelCase )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
snake_case_ : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case_ : Optional[int] = parser.parse_args()
snake_case_ : List[str] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> int:
"""simple docstring"""
try:
snake_case_ : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case_ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple:
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase )
lowerCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = True
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = tempfile.mkdtemp()
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__magic_name__ )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : str = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = AcceleratorState()
snake_case_ : List[str] = tensor[None].clone().to(state.device )
snake_case_ : Optional[Any] = gather(_UpperCamelCase ).cpu()
snake_case_ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = returncode
snake_case_ : List[Any] = stdout
snake_case_ : Tuple = stderr
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
while True:
snake_case_ : Tuple = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) )
snake_case_ : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case_ : List[Any] = []
snake_case_ : List[Any] = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
snake_case_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput:
"""simple docstring"""
snake_case_ : List[str] = asyncio.get_event_loop()
snake_case_ : List[Any] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
snake_case_ : Optional[int] = ''' '''.join(_UpperCamelCase )
if result.returncode > 0:
snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( _a ):
pass
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
try:
snake_case_ : List[str] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , '''decode''' ):
snake_case_ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 279 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
UpperCAmelCase__ = '▁'
class lowerCamelCase__ ( a__):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<unk>" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[MASK]" , UpperCAmelCase="[CLS]" , **UpperCAmelCase , ) -> Any:
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase =AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_lowercase =vocab_file
_lowercase =False if not self.vocab_file else True
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowercase =os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 5 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple =["pixel_values"]
def __init__( self :List[str] , _lowercase :bool = True , _lowercase :Optional[Dict[str, int]] = None , _lowercase :PILImageResampling = PILImageResampling.BILINEAR , _lowercase :bool = True , _lowercase :Dict[str, int] = None , _lowercase :bool = True , _lowercase :Union[int, float] = 1 / 255 , _lowercase :bool = True , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , **_lowercase :Tuple , ) -> None:
super().__init__(**_lowercase)
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(_lowercase , param_name='''crop_size''')
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self :Any , _lowercase :np.ndarray , _lowercase :Dict[str, int] , _lowercase :PILImageResampling = PILImageResampling.BICUBIC , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
UpperCAmelCase_ = get_resize_output_image_size(_lowercase , size=size['''shortest_edge'''] , default_to_square=_lowercase)
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Optional[int] , _lowercase :np.ndarray , _lowercase :Dict[str, int] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase_ = get_size_dict(_lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase)
def __a ( self :List[str] , _lowercase :np.ndarray , _lowercase :float , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :Optional[Any]) -> np.ndarray:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Union[str, Any] , _lowercase :np.ndarray , _lowercase :Union[float, List[float]] , _lowercase :Union[float, List[float]] , _lowercase :Optional[Union[str, ChannelDimension]] = None , **_lowercase :int , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase)
def __a ( self :Optional[int] , _lowercase :ImageInput , _lowercase :Optional[bool] = None , _lowercase :Dict[str, int] = None , _lowercase :PILImageResampling = None , _lowercase :bool = None , _lowercase :Dict[str, int] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[float] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[float, List[float]]] = None , _lowercase :Optional[Union[str, TensorType]] = None , _lowercase :Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase :Dict , ) -> int:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase)
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_lowercase , param_name='''crop_size''')
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(_lowercase)
if not valid_images(_lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_lowercase) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=_lowercase , size=_lowercase) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_lowercase , scale=_lowercase) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_lowercase , _lowercase) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase)
def __a ( self :Tuple , _lowercase :Optional[Any] , _lowercase :List[Tuple] = None) -> List[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase) != len(_lowercase):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(_lowercase):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(_lowercase)):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase)
UpperCAmelCase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_lowercase)
else:
UpperCAmelCase_ = logits.argmax(dim=1)
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 359 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__lowercase : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> int:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Optional[Any] = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> Optional[Any]:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> Dict:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 362 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](snake_case , snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 92 | 0 |
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> List[str]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
def __call__( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE__ = scheduler_output - scheduler_output + torch.ones_like(__lowerCamelCase )
return result
| 314 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : str , _lowercase : list ):
__UpperCAmelCase = set_counts
__UpperCAmelCase = max(_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = [1] * num_sets
__UpperCAmelCase = list(range(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = self.get_parent(_lowercase )
__UpperCAmelCase = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = src_parent
__UpperCAmelCase = self.set_counts[src_parent]
__UpperCAmelCase = max(self.max_set , _lowercase )
return True
def a ( self : Optional[Any] , _lowercase : int ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 86 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return None
class __lowerCAmelCase :
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
return None
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
@require_torch
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from transformers import BertModel
snake_case_ : str = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(__magic_name__ ) )
vocab_file.flush()
snake_case_ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
snake_case_ : str = BertModel(BertConfig(vocab_size=len(__magic_name__ ) ) )
model.save_pretrained(__magic_name__ )
self._test_export(__magic_name__ , '''pt''' , 12 , __magic_name__ )
@require_tf
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Tuple = self._test_export(__magic_name__ , '''tf''' , 12 , **__magic_name__ )
snake_case_ : List[str] = quantize(Path(__magic_name__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
snake_case_ : Any = self._test_export(__magic_name__ , '''pt''' , 12 , **__magic_name__ )
snake_case_ : Any = quantize(__magic_name__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__magic_name__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
snake_case_ : List[str] = Path(__magic_name__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
return path
except Exception as e:
self.fail(__magic_name__ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
snake_case_ : Optional[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
from transformers import TFBertModel
snake_case_ : Any = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
snake_case_ : str = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(__magic_name__ , __magic_name__ , '''tf''' )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Tuple = FeatureExtractionPipeline(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = infer_shapes(__magic_name__ , __magic_name__ )
# Assert all variables are present
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __magic_name__ )
self.assertSequenceEqual(variable_names[3:] , __magic_name__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
snake_case_ : List[str] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
snake_case_ , snake_case_ : Tuple = ensure_valid_input(FuncContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__magic_name__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__magic_name__ ) , set(__magic_name__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__magic_name__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
snake_case_ , snake_case_ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __magic_name__ , __magic_name__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 279 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> Any: # picklable for multiprocessing
return x.sum()
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict )-> Dict: # picklable for multiprocessing
return i + 1
@dataclass
class __a :
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : str
class __a ( lowerCAmelCase__ ):
def snake_case_ ( self ):
_lowerCamelCase = {}
_lowerCamelCase = []
_lowerCamelCase = 1
_lowerCamelCase = [1, 2]
_lowerCamelCase = {'a': 1, 'b': 2}
_lowerCamelCase = {'a': [1, 2], 'b': [3, 4]}
_lowerCamelCase = {'a': {'1': 1}, 'b': 2}
_lowerCamelCase = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_lowerCamelCase = {}
_lowerCamelCase = []
_lowerCamelCase = 2
_lowerCamelCase = [2, 3]
_lowerCamelCase = {'a': 2, 'b': 3}
_lowerCamelCase = {'a': [2, 3], 'b': [4, 5]}
_lowerCamelCase = {'a': {'1': 2}, 'b': 3}
_lowerCamelCase = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ ) , a__ )
_lowerCamelCase = 2
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
self.assertEqual(map_nested(a__ , a__ , num_proc=a__ ) , a__ )
_lowerCamelCase = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
_lowerCamelCase = {'a': 2, 'b': 0, 'c': 2}
_lowerCamelCase = {
'a': np.eye(2 ).astype(a__ ),
'b': np.zeros(3 ).astype(a__ ),
'c': np.ones(2 ).astype(a__ ),
}
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ ) , a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__ ) , a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ , a__ , map_numpy=a__ , num_proc=a__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a__ ): # can't pickle a local lambda
map_nested(lambda a__ : x + 1 , a__ , num_proc=a__ )
def snake_case_ ( self ):
_lowerCamelCase = {'a': 1, 'b': 2}
_lowerCamelCase = {'a': 3, 'b': 4}
_lowerCamelCase = {'a': 5, 'b': 6}
_lowerCamelCase = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a__ , a__ , a__ ) ) , a__ )
def snake_case_ ( self ):
class __a :
SCREAMING_SNAKE_CASE__ : Dict = "bar"
_lowerCamelCase = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(a__ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any] )-> List[Any]:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
_lowerCamelCase = {f'{i}': i for i in range(snake_case )}
_lowerCamelCase = map_nested(lambda snake_case : x + 10 , snake_case , num_proc=snake_case , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __a ( lowerCAmelCase__ ):
@require_tf
def snake_case_ ( self ):
import tensorflow as tf
from tensorflow.keras import layers
_lowerCamelCase = layers.Dense(2 )
def gen_random_output():
_lowerCamelCase = tf.random.uniform((1, 3) )
return model(a__ ).numpy()
with temp_seed(42 , set_tensorflow=a__ ):
_lowerCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=a__ ):
_lowerCamelCase = gen_random_output()
_lowerCamelCase = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case_ ( self ):
import torch
def gen_random_output():
_lowerCamelCase = torch.nn.Linear(3 , 2 )
_lowerCamelCase = torch.rand(1 , 3 )
return model(a__ ).detach().numpy()
with temp_seed(42 , set_pytorch=a__ ):
_lowerCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=a__ ):
_lowerCamelCase = gen_random_output()
_lowerCamelCase = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case_ ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_lowerCamelCase = gen_random_output()
with temp_seed(42 ):
_lowerCamelCase = gen_random_output()
_lowerCamelCase = gen_random_output()
np.testing.assert_equal(a__ , a__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] )-> int:
_lowerCamelCase = NestedDataStructure(snake_case ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : Tuple )-> Optional[Any]:
_lowerCamelCase = NestedDataStructure(snake_case ).flatten()
assert output == expected_output
def SCREAMING_SNAKE_CASE_ ( )-> Optional[Any]:
_lowerCamelCase = A(x=1 , y='foobar' )
_lowerCamelCase = {'x': 1, 'y': 'foobar'}
assert asdict(snake_case ) == expected_output
_lowerCamelCase = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
_lowerCamelCase = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(snake_case ) == expected_output
with pytest.raises(snake_case ):
asdict([1, A(x=10 , y='foo' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
return text.split()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] )-> int:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def SCREAMING_SNAKE_CASE_ ( )-> Any:
with Pool(2 ) as pool:
_lowerCamelCase = list(iflatmap_unordered(snake_case , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(snake_case ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_lowerCamelCase = list(iflatmap_unordered(snake_case , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(snake_case ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_lowerCamelCase = []
for yield_time, content in iflatmap_unordered(
snake_case , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(snake_case )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(snake_case ) == 4
| 80 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int:
_lowerCamelCase = defaultdict(snake_case )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ):
if gcd(snake_case , snake_case ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case , limit + 1 , snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 80 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
__UpperCAmelCase = 'file'
@pytest.fixture(scope='session' )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[int]:
UpperCamelCase : int = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
UpperCamelCase : int = bytes(a_ , 'utf-8' )
with zstd.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[int]:
with open(os.path.join(tmpfs.local_root_dir , a_ ) , 'w' ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase : Any = input_paths[compression_format]
UpperCamelCase : Tuple = tmp_path / """cache"""
UpperCamelCase : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
UpperCamelCase : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
UpperCamelCase : Optional[Any] = f.read()
with open(a_ ) as f:
UpperCamelCase : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> str:
UpperCamelCase : Union[str, Any] = """custom_cache"""
UpperCamelCase : List[str] = """custom_extracted_dir"""
UpperCamelCase : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_ ) )
UpperCamelCase : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase : List[Any] = xz_file
UpperCamelCase : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
UpperCamelCase : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Dict:
UpperCamelCase : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
UpperCamelCase : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCamelCase ( snake_case__ : Any ) -> int:
UpperCamelCase : Optional[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
UpperCamelCase : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Tuple:
UpperCamelCase : Any = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(a_ ) as f:
UpperCamelCase : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_ )
def UpperCamelCase ( ) -> List[str]:
with pytest.raises(a_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_ )
def UpperCamelCase ( snake_case__ : str ) -> Union[str, Any]:
UpperCamelCase : List[str] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(a_ ):
http_get('https://huggingface.co' , temp_file=a_ )
with pytest.raises(a_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_ )
def UpperCamelCase ( snake_case__ : Any ) -> int:
UpperCamelCase : List[Any] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(a_ ):
ftp_get('ftp://huggingface.co' , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[int]:
UpperCamelCase : Optional[int] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get('s3://huggingface.co' , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head('s3://huggingface.co' )
| 119 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ) -> Any:
A_ : List[Any] = parent
A_ : int = config_class
A_ : int = has_text_modality
A_ : str = kwargs
A_ : int = common_properties
def UpperCAmelCase_ ( self ) -> str:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : Optional[int] = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
A_ : List[str] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F"`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = self.config_class(**self.inputs_dict )
A_ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = os.path.join(_lowerCamelCase , """config.json""" )
config_first.to_json_file(_lowerCamelCase )
A_ : Dict = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
A_ : Union[str, Any] = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.config_class(**self.inputs_dict )
A_ : List[Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
A_ : Any = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Tuple = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A_ : str = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_class.is_composition:
return
A_ : Dict = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Any = copy.deepcopy(_lowerCamelCase )
A_ : Tuple = self.config_class(**_lowerCamelCase )
A_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
A_ : List[Any] = """\n""".join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 344 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ = """roberta"""
def __init__( self : Dict , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : Optional[Any]=7_6_8 , _lowerCAmelCase : List[str]=1_2 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Optional[int]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=5_1_2 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Union[str, Any]=1e-12 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Any="absolute" , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
__lowercase =classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a_ ( __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __UpperCamelCase ( snake_case__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''env''' )
download_parser.set_defaults(func=_A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =huggingface_hub.__version__
lowerCamelCase_ ='''not installed'''
lowerCamelCase_ ='''NA'''
if is_torch_available():
import torch
lowerCamelCase_ =torch.__version__
lowerCamelCase_ =torch.cuda.is_available()
lowerCamelCase_ ='''not installed'''
if is_transformers_available():
import transformers
lowerCamelCase_ =transformers.__version__
lowerCamelCase_ ='''not installed'''
if is_accelerate_available():
import accelerate
lowerCamelCase_ =accelerate.__version__
lowerCamelCase_ ='''not installed'''
if is_xformers_available():
import xformers
lowerCamelCase_ =xformers.__version__
lowerCamelCase_ ={
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_A ) )
return info
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 75 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( snake_case__ ):
def __init__( self , *_A , _A=None , _A=None , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
__lowerCAmelCase = eval_examples
__lowerCAmelCase = post_process_function
def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__lowerCAmelCase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__lowerCAmelCase = gen_kwargs
__lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase = self.get_eval_dataloader(_A )
__lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCAmelCase = self.post_process_function(_A , _A , _A )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
__lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 92 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """deit"""
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=True , _snake_case=16 , **_snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = encoder_stride
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ) -> float:
"""simple docstring"""
return 1e-4
| 152 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , *_snake_case , **_snake_case ) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 152 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __lowerCamelCase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
A_ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
import datasets
__lowerCAmelCase : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCAmelCase : int = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCAmelCase : Union[str, Any] = object_detector(_SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
__lowerCAmelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'facebook/detr-resnet-50'
__lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : Any = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'facebook/detr-resnet-50'
__lowerCAmelCase : Any = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 0.9985
__lowerCAmelCase : List[str] = 'facebook/detr-resnet-50'
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCAmelCase : Optional[Any] = 0.9993
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , ) | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_SCREAMING_SNAKE_CASE = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ''' Hello world! cécé herlolip'''
_SCREAMING_SNAKE_CASE = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ):
__lowercase = dct.pop(lowerCamelCase_ )
__lowercase = val
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any]=None ):
if not os.path.exists(lowerCamelCase_ ):
__lowercase = torch.hub.load('''pytorch/fairseq''' , lowerCamelCase_ ).eval()
else:
__lowercase = load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowercase = checkpoint_path.replace('''.''' , '''-''' )
__lowercase = BartConfig.from_pretrained(lowerCamelCase_ )
__lowercase = bart.encode(lowerCamelCase_ ).unsqueeze(0 )
__lowercase = BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ , lowerCamelCase_ ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
__lowercase = bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowercase = BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = bart.predict('''mnli''' , lowerCamelCase_ , return_logits=lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
__lowercase = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
__lowercase = BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
__lowercase = model(lowerCamelCase_ ).model[0]
else:
__lowercase = BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ , '''lm_head''' ):
__lowercase = make_linear_from_emb(model.model.shared )
__lowercase = model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 217 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowercase_ ( a__ ):
def __init__( self , a ):
UpperCamelCase__ = data
def __iter__( self ):
for element in self.data:
yield element
def _UpperCamelCase ( __A=True ) -> int:
'''simple docstring'''
UpperCamelCase__ = Accelerator(even_batches=__A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _UpperCamelCase ( __A , __A , __A , __A = False ) -> List[str]:
'''simple docstring'''
if iterable:
UpperCamelCase__ = DummyIterableDataset(torch.as_tensor(range(__A ) ) )
else:
UpperCamelCase__ = TensorDataset(torch.as_tensor(range(__A ) ) )
UpperCamelCase__ = DataLoader(__A , batch_size=__A )
UpperCamelCase__ = accelerator.prepare(__A )
return dl
def _UpperCamelCase ( __A , __A , __A , __A , __A , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = create_dataloader(accelerator=__A , dataset_size=__A , batch_size=__A )
UpperCamelCase__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = create_accelerator(even_batches=__A )
verify_dataloader_batch_sizes(
__A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
UpperCamelCase__ = create_accelerator(even_batches=__A )
UpperCamelCase__ = torch.nn.Linear(1 , 1 )
UpperCamelCase__ = accelerator.prepare(__A )
UpperCamelCase__ = create_dataloader(__A , dataset_size=3 , batch_size=1 )
UpperCamelCase__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__A ):
UpperCamelCase__ = ddp_model(batch[0].float() )
UpperCamelCase__ = output.sum()
loss.backward()
batch_idxs.append(__A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with warnings.catch_warnings(record=__A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __A )
assert "only supported for multi-GPU" in str(w[-1].message )
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = create_accelerator(even_batches=__A )
UpperCamelCase__ = torch.nn.Linear(1 , 1 )
UpperCamelCase__ = accelerator.prepare(__A )
UpperCamelCase__ = create_dataloader(__A , dataset_size=3 , batch_size=1 )
UpperCamelCase__ = create_dataloader(__A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ):
UpperCamelCase__ = train_dl.batch_sampler.even_batches
UpperCamelCase__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = create_accelerator(even_batches=__A )
UpperCamelCase__ = torch.nn.Linear(1 , 1 )
UpperCamelCase__ = accelerator.prepare(__A )
create_dataloader(__A , dataset_size=3 , batch_size=1 , iterable=__A )
UpperCamelCase__ = create_dataloader(__A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ):
UpperCamelCase__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = create_accelerator()
UpperCamelCase__ = torch.nn.Linear(1 , 1 )
UpperCamelCase__ = accelerator.prepare(__A )
create_dataloader(__A , dataset_size=3 , batch_size=1 , iterable=__A )
with warnings.catch_warnings(record=__A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__A ):
pass
assert issubclass(w[-1].category , __A )
assert "only supported for map-style datasets" in str(w[-1].message )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
UpperCamelCase__ = accelerator.state.distributed_type
UpperCamelCase__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__A )
UpperCamelCase__ = original_state
if __name__ == "__main__":
main()
| 80 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
UpperCamelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase_ = tokenizer("Hello there" , return_tensors="np" ).input_ids
UpperCamelCase_ = tokenizer("Hi I am" , return_tensors="np" ).input_ids
UpperCamelCase_ = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase_ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
UpperCamelCase_ = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1] ) ).mean()
UpperCamelCase_ = -(labels.shape[-1] * loss.item())
UpperCamelCase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 369 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'philschmid/bart-large-cnn-samsum'
lowerCAmelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowerCAmelCase__ = 'summarizer'
lowerCAmelCase__ = AutoTokenizer
lowerCAmelCase__ = AutoModelForSeqaSeqLM
lowerCAmelCase__ = ['text']
lowerCAmelCase__ = ['text']
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
return self.pre_processor(lowercase , return_tensors="pt" , truncation=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
return self.model.generate(**lowercase )[0]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 0 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[Any] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : Optional[int] = """config.json"""
lowerCamelCase_ : int = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : Tuple = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Optional[Any] = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : Optional[Any] = ["""fp16""", """non-ema"""]
lowerCamelCase_ : Optional[int] = """.self_attn"""
| 197 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a__ ( __snake_case ):
A__ : torch.FloatTensor
A__ : torch.FloatTensor
A__ : Optional[torch.FloatTensor] = None
class a__ ( __snake_case , __snake_case ):
A__ : Optional[Any] = 2
@register_to_config
def __init__( self , UpperCAmelCase = 0.02 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 1.007 , UpperCAmelCase = 8_0 , UpperCAmelCase = 0.05 , UpperCAmelCase = 5_0 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
__a = sigma_max
# setable values
__a = None
__a = None
__a = None # sigma(t_i)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> torch.FloatTensor:
return sample
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> int:
__a = num_inference_steps
__a = np.arange(0 , self.num_inference_steps )[::-1].copy()
__a = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
__a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__a = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__a = 0
# sample eps ~ N(0, S_noise^2 * I)
__a = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
__a = sigma + gamma * sigma
__a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
__a = sample_hat + sigma_hat * model_output
__a = (sample_hat - pred_original_sample) / sigma_hat
__a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[KarrasVeOutput, Tuple]:
__a = sample_prev + sigma_prev * model_output
__a = (sample_prev - pred_original_sample) / sigma_prev
__a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
raise NotImplementedError()
| 197 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
if start < end:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =randint(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =a[end]
SCREAMING_SNAKE_CASE__ : int =a[pivot]
SCREAMING_SNAKE_CASE__ : int =temp
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =_in_place_partition(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
count += _in_place_quick_sort(UpperCamelCase__, UpperCamelCase__, p - 1 )
count += _in_place_quick_sort(UpperCamelCase__, p + 1, UpperCamelCase__ )
return count
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =randint(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any =a[end]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[pivot]
SCREAMING_SNAKE_CASE__ : Any =temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] =start - 1
for index in range(UpperCamelCase__, UpperCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE__ : Dict =new_pivot_index + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[new_pivot_index]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[index]
SCREAMING_SNAKE_CASE__ : Dict =temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] =a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE__ : Optional[int] =a[end]
SCREAMING_SNAKE_CASE__ : Dict =temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 1_0_0 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 152 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =AlbertConfig.from_json_file(UpperCamelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE__ : Any =AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict(), UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path) | 152 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Optional[int] , a :List[str] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__UpperCamelCase : Dict = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(a )
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
__UpperCamelCase : List[Any] = TensorFlowBenchmark(a )
__UpperCamelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = "sgugger/tiny-distilbert-classification"
__UpperCamelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
__UpperCamelCase : Optional[int] = TensorFlowBenchmark(a )
__UpperCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :List[str] ) -> Dict:
__UpperCamelCase : int = "sshleifer/tiny-gpt2"
__UpperCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Dict = TensorFlowBenchmark(a )
__UpperCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : Tuple = AutoConfig.from_pretrained(a )
__UpperCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
__UpperCamelCase : Optional[int] = TensorFlowBenchmark(a , [config] )
__UpperCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(a )
__UpperCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Dict = TensorFlowBenchmark(a , [config] )
__UpperCamelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :str ) -> List[str]:
__UpperCamelCase : List[Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : Dict = TensorFlowBenchmark(a )
__UpperCamelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : Dict = AutoConfig.from_pretrained(a )
__UpperCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : List[Any] = TensorFlowBenchmark(a , [config] )
__UpperCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : Dict = "patrickvonplaten/t5-tiny-random"
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(a )
__UpperCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
__UpperCamelCase : List[str] = TensorFlowBenchmark(a , configs=[config] )
__UpperCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
__UpperCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
__UpperCamelCase : List[Any] = TensorFlowBenchmark(a )
__UpperCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self :Tuple ) -> Dict:
__UpperCamelCase : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(a , "inf_mem.csv" ) , env_info_csv_file=os.path.join(a , "env.csv" ) , multi_process=a , )
__UpperCamelCase : Optional[int] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a , "env.csv" ) ).exists() )
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
__UpperCamelCase : int = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(a :Optional[Any] ):
self.assertTrue(hasattr(a , "sequential" ) )
self.assertTrue(hasattr(a , "cumulative" ) )
self.assertTrue(hasattr(a , "current" ) )
self.assertTrue(hasattr(a , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , "log.txt" ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
__UpperCamelCase : Any = TensorFlowBenchmark(a )
__UpperCamelCase : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , "log.txt" ) ).exists() ) | 151 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[str] = 0
while number > 0:
__lowerCAmelCase: Any = number % 1_0
sum_of_digits += last_digit
__lowerCAmelCase: List[Any] = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0 ) -> int:
__lowerCAmelCase: Tuple = factorial(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 217 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase_ : Optional[List[bool]]
UpperCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 280 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCamelCase_ = model_name.find("patch" )
lowerCamelCase_ = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
lowerCamelCase_ = XCLIPVisionConfig(patch_size=UpperCAmelCase_ , num_frames=UpperCAmelCase_ )
if "large" in model_name:
lowerCamelCase_ = 768
lowerCamelCase_ = 3072
lowerCamelCase_ = 12
lowerCamelCase_ = 1024
lowerCamelCase_ = 4096
lowerCamelCase_ = 16
lowerCamelCase_ = 24
lowerCamelCase_ = 768
lowerCamelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = 336
lowerCamelCase_ = XCLIPConfig.from_text_vision_configs(UpperCAmelCase_ , UpperCAmelCase_ )
if "large" in model_name:
lowerCamelCase_ = 768
return config
def __snake_case ( UpperCAmelCase_ : str ):
# text encoder
if name == "token_embedding.weight":
lowerCamelCase_ = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
lowerCamelCase_ = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
lowerCamelCase_ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCamelCase_ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCamelCase_ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCamelCase_ = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
lowerCamelCase_ = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
lowerCamelCase_ = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
lowerCamelCase_ = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
lowerCamelCase_ = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
lowerCamelCase_ = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
lowerCamelCase_ = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
lowerCamelCase_ = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
lowerCamelCase_ = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
lowerCamelCase_ = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
lowerCamelCase_ = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
lowerCamelCase_ = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
lowerCamelCase_ = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
lowerCamelCase_ = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
lowerCamelCase_ = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
lowerCamelCase_ = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
lowerCamelCase_ = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(UpperCAmelCase_ )
if "attn.in_proj" in key:
lowerCamelCase_ = key.split("." )
if key.startswith("visual" ):
lowerCamelCase_ = key_split[3]
lowerCamelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[
:dim
]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
elif key.startswith("mit" ):
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = rename_key(UpperCAmelCase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCamelCase_ = val.T
lowerCamelCase_ = val
return orig_state_dict
def __snake_case ( UpperCAmelCase_ : List[str] ):
if num_frames == 8:
lowerCamelCase_ = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
lowerCamelCase_ = "eating_spaghetti.npy"
elif num_frames == 32:
lowerCamelCase_ = "eating_spaghetti_32_frames.npy"
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=UpperCAmelCase_ , repo_type="dataset" , )
lowerCamelCase_ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=False ):
lowerCamelCase_ = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
lowerCamelCase_ = model_to_url[model_name]
lowerCamelCase_ = 8
if "16-frames" in model_name:
lowerCamelCase_ = 16
elif "shot" in model_name:
lowerCamelCase_ = 32
lowerCamelCase_ = get_xclip_config(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = XCLIPModel(UpperCAmelCase_ )
model.eval()
if "drive" in checkpoint_url:
lowerCamelCase_ = "pytorch_model.bin"
gdown.cached_download(UpperCAmelCase_ , UpperCAmelCase_ , quiet=UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" )["model"]
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ )["model"]
lowerCamelCase_ = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = XCLIPModel(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCamelCase_ = 336 if model_name == "xclip-large-patch14-16-frames" else 224
lowerCamelCase_ = VideoMAEImageProcessor(size=UpperCAmelCase_ )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase_ = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
lowerCamelCase_ = XCLIPProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
lowerCamelCase_ = prepare_video(UpperCAmelCase_ )
lowerCamelCase_ = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase_ )
# Verify outputs
lowerCamelCase_ = outputs.logits_per_video
lowerCamelCase_ = logits_per_video.softmax(dim=1 )
print("Probs:" , UpperCAmelCase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCamelCase_ = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCamelCase_ = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
lowerCamelCase_ = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCamelCase_ = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
lowerCamelCase_ = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCamelCase_ = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCamelCase_ = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCamelCase_ = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCamelCase_ = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCamelCase_ = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCamelCase_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCamelCase_ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCamelCase_ = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCamelCase_ = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCamelCase_ = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(UpperCAmelCase_ , organization="nielsr" )
processor.push_to_hub(UpperCAmelCase_ , organization="nielsr" )
slow_tokenizer.push_to_hub(UpperCAmelCase_ , organization="nielsr" )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def __lowerCamelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def __lowerCamelCase ( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) ->Optional[int]:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __SCREAMING_SNAKE_CASE ( A__ ):
@require_beam
def __lowerCamelCase ( self ):
lowercase : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase : Union[str, Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __lowerCamelCase ( self ):
import apache_beam as beam
lowercase : List[Any] = beam.io.parquetio.WriteToParquet
lowercase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
lowercase : int = partial(SCREAMING_SNAKE_CASE__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , SCREAMING_SNAKE_CASE__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def __lowerCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase : Optional[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowerCamelCase ( self ):
lowercase : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase : List[Any] = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
lowercase : List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 173 |
# flake8: noqa
# Lint as: python3
__a = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 173 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase = []
lowercase = set({"""(""", """[""", """{"""} )
lowercase = set({""")""", """]""", """}"""} )
lowercase = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCAmelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase__ ) == 0 or (len(lowerCAmelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase__ ) == 0
def UpperCAmelCase__ ( ) -> str:
'''simple docstring'''
lowercase = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCAmelCase__ ):
print(lowerCAmelCase__ , """is balanced""" )
else:
print(lowerCAmelCase__ , """is not balanced""" )
if __name__ == "__main__":
main()
| 197 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowercase = precision
lowercase = ceil(precision / 1_4 )
lowercase = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
lowercase = 1
lowercase = 1_3_5_9_1_4_0_9
lowercase = Decimal(lowerCAmelCase__ )
for k in range(1 , lowerCAmelCase__ ):
lowercase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 197 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.array([len(UpperCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , UpperCAmelCase_ : Optional[Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
return len(self.lengths )
def A_ ( self : Optional[int] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ = self.lengths > max_len
logger.info(F'Splitting {sum(UpperCAmelCase_ )} too long sequences.' )
def divide_chunks(UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ = np.insert(UpperCAmelCase_ , 0 , UpperCAmelCase_ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ = np.insert(UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_ )
new_tok_ids.extend(UpperCAmelCase_ )
new_lengths.extend([len(UpperCAmelCase_ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.array(UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = len(self )
SCREAMING_SNAKE_CASE__ = self.lengths > 11
SCREAMING_SNAKE_CASE__ = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ = self.lengths[indices]
SCREAMING_SNAKE_CASE__ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def A_ ( self : Optional[int] ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids['unk_token']
SCREAMING_SNAKE_CASE__ = len(self )
SCREAMING_SNAKE_CASE__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ = self.lengths[indices]
SCREAMING_SNAKE_CASE__ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def A_ ( self : List[str] ):
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A_ ( self : Dict , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ = [t[1] for t in batch]
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
# Max for paddings
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids['pad_token']
else:
SCREAMING_SNAKE_CASE__ = self.params.special_tok_ids['unk_token']
SCREAMING_SNAKE_CASE__ = [list(t.astype(UpperCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase_ )
assert all(len(UpperCAmelCase_ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ ) # (bs)
return tk_t, lg_t
| 169 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(number**0.5 )
return number == sq * sq
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[int, int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE__ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def _lowercase ( UpperCamelCase_ = 35 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = Fraction(0 )
SCREAMING_SNAKE_CASE__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE__ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE__ = x_den * x_den * y_den * y_den
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=-1
SCREAMING_SNAKE_CASE__ = x_num * y_num
SCREAMING_SNAKE_CASE__ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
# n=2
SCREAMING_SNAKE_CASE__ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCamelCase_ ) and is_sq(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = int(sqrt(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ = gcd(UpperCamelCase_ , UpperCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE__ = add_three(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
unique_s.add(UpperCamelCase_ )
for num, den in unique_s:
total += Fraction(UpperCamelCase_ , UpperCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 169 | 1 |
'''simple docstring'''
from functools import reduce
lowercase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def UpperCamelCase( UpperCAmelCase_ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : str(int(UpperCAmelCase_ ) * int(UpperCAmelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCAmelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 151 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :int = len(lowercase__ )
for i in range(n - 1 ):
for j in range(i + 1 , lowercase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _snake_case ( lowercase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if len(lowercase__ ) <= 1:
return arr, 0
lowerCAmelCase_ :Dict = len(lowercase__ ) // 2
lowerCAmelCase_ :List[str] = arr[0:mid]
lowerCAmelCase_ :Dict = arr[mid:]
lowerCAmelCase_ :str = count_inversions_recursive(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = count_inversions_recursive(lowercase__ )
lowerCAmelCase_ :Any = _count_cross_inversions(lowercase__ , lowercase__ )
lowerCAmelCase_ :str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :str = 0
while i < len(lowercase__ ) and j < len(lowercase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowercase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowercase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase_ :List[Any] = count_inversions_bf(lowercase__ )
lowerCAmelCase_ :Optional[int] = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowercase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase_ :List[Any] = count_inversions_bf(lowercase__ )
lowerCAmelCase_ :str = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowercase__ )
# an empty list should also have zero inversions
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :str = count_inversions_bf(lowercase__ )
lowerCAmelCase_ :List[Any] = count_inversions_recursive(lowercase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowercase__ )
if __name__ == "__main__":
main()
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : List[str] = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
__A : Optional[Any] = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(a )
# Let's go
__A : List[str] = parser.parse_args()
if not hasattr(a , 'func' ):
parser.print_help()
exit(1 )
# Run
__A : Optional[int] = args.func(a )
service.run()
if __name__ == "__main__":
main()
| 280 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280 | 1 |
__lowercase = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
__lowercase = {value: key for key, value in encode_dict.items()}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__UpperCamelCase :Optional[Any] = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
__UpperCamelCase :Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370 | import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = PhobertTokenizer
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :Union[str, Any] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__UpperCamelCase :int = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Dict = ['''#version: 0.2''', '''l à</w>''']
__UpperCamelCase :Any = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :List[Any] = '''Tôi là VinAI Research'''
__UpperCamelCase :List[str] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__UpperCamelCase :List[Any] = '''Tôi là VinAI Research'''
__UpperCamelCase :List[str] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__UpperCamelCase :int = tokenizer.tokenize(__lowercase)
print(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :Dict = tokens + [tokenizer.unk_token]
__UpperCamelCase :Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
| 105 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_UpperCAmelCase = """
import os
"""
_UpperCAmelCase = """
def foo():
import os
return False
"""
_UpperCAmelCase = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_UpperCAmelCase = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCAmelCase = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_UpperCAmelCase = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_UpperCAmelCase = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_UpperCAmelCase = """
import os
try:
import bar
except:
raise ValueError()
"""
_UpperCAmelCase = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_UpperCAmelCase = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_UpperCAmelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , lowercase )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =os.path.join(lowercase , """test_file.py""" )
with open(lowercase , """w""" ) as _tmp_file:
_tmp_file.write(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =get_imports(lowercase )
assert parsed_imports == ["os"]
| 173 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __magic_name__ ( lowercase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Dict =subparsers.add_parser("""test""" )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=lowercase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_: Any =script_name
else:
SCREAMING_SNAKE_CASE_: int =f'''--config_file={args.config_file} {script_name}'''
SCREAMING_SNAKE_CASE_: Any =["""accelerate-launch"""] + test_args.split()
SCREAMING_SNAKE_CASE_: Dict =execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =test_command_parser()
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 173 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = GPTSanJapaneseTokenizer
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase__ = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase__ = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase__ = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = tokenizer.encode("""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
UpperCAmelCase__ = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
UpperCAmelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
| 61 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self :int , lowerCamelCase :UNetaDModel , lowerCamelCase :ScoreSdeVeScheduler ) -> Any:
super().__init__()
self.register_modules(unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , lowerCamelCase :int = 1 , lowerCamelCase :int = 2000 , lowerCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase :Optional[str] = "pil" , lowerCamelCase :bool = True , **lowerCamelCase :Any , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(lowerCamelCase , generator=lowerCamelCase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase )
self.scheduler.set_sigmas(lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(lowerCamelCase , lowerCamelCase , generator=lowerCamelCase ).prev_sample
# prediction step
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCamelCase )
| 169 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """markuplm"""
def __init__( self :Dict , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :List[Any]=768 , lowerCamelCase :Union[str, Any]=12 , lowerCamelCase :Optional[int]=12 , lowerCamelCase :List[str]=3072 , lowerCamelCase :Dict="gelu" , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Union[str, Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :int=0.02 , lowerCamelCase :int=1e-12 , lowerCamelCase :Tuple=0 , lowerCamelCase :List[str]=0 , lowerCamelCase :int=2 , lowerCamelCase :Optional[int]=256 , lowerCamelCase :List[str]=1024 , lowerCamelCase :Optional[Any]=216 , lowerCamelCase :str=1001 , lowerCamelCase :List[str]=32 , lowerCamelCase :Dict=50 , lowerCamelCase :int="absolute" , lowerCamelCase :Union[str, Any]=True , lowerCamelCase :Dict=None , **lowerCamelCase :List[Any] , ) -> int:
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
# additional properties
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = max_xpath_tag_unit_embeddings
UpperCAmelCase__ = max_xpath_subs_unit_embeddings
UpperCAmelCase__ = tag_pad_id
UpperCAmelCase__ = subs_pad_id
UpperCAmelCase__ = xpath_unit_hidden_size
| 169 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
super().__init__(**__snake_case )
A_ : Tuple = size if size is not None else {'shortest_edge': 2_5_6}
A_ : Dict = get_size_dict(__snake_case , default_to_square=__snake_case )
A_ : Optional[Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
A_ : Union[str, Any] = get_size_dict(__snake_case , param_name='crop_size' )
A_ : Union[str, Any] = do_resize
A_ : Tuple = size
A_ : Tuple = resample
A_ : Union[str, Any] = do_center_crop
A_ : Dict = crop_size
A_ : Optional[int] = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Tuple = do_normalize
A_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Any = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Optional[Any] = get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Dict = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
"""simple docstring"""
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
"""simple docstring"""
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Dict = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(__snake_case , default_to_square=__snake_case )
A_ : Any = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Tuple = crop_size if crop_size is not None else self.crop_size
A_ : List[Any] = get_size_dict(__snake_case , param_name='crop_size' )
A_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Any = image_std if image_std is not None else self.image_std
A_ : Tuple = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A_ : List[str] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
A_ : int = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
A_ : Optional[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
A_ : str = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
A_ : str = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
A_ : int = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
A_ : Tuple = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__snake_case ):
A_ : Tuple = target_sizes.numpy()
A_ : Dict = []
for idx in range(len(__snake_case ) ):
A_ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__snake_case )
A_ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
A_ : List[Any] = logits.argmax(dim=1 )
A_ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 371 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''lilt'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1_0_2_4 , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : str = vocab_size
A_ : Tuple = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : int = intermediate_size
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : Any = initializer_range
A_ : str = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Union[str, Any] = classifier_dropout
A_ : Optional[Any] = channel_shrink_ratio
A_ : int = max_ad_position_embeddings
| 192 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase ( UpperCAmelCase__ : str = "isbn/0140328726" ) -> dict:
lowercase_ : List[Any] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase_ : Union[str, Any] = F'''{olid} is not a valid Open Library olid'''
raise ValueError(snake_case_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCamelCase ( UpperCAmelCase__ : dict ) -> dict:
lowercase_ : Optional[Any] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase_ : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase_ : Any = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase_ : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
lowercase_ : Dict = """, """.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : Tuple = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase : Union[str, Any] = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print("\n".join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 239 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : int = XLMTokenizer
lowercase : Tuple = False
def a__ ( self :List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case_ : str = dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
snake_case_ : Optional[int] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
snake_case_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
with open(self.merges_file ,"""w""" ) as fp:
fp.write("""\n""".join(_UpperCamelCase ) )
def a__ ( self :Tuple ,_UpperCamelCase :List[str] ):
snake_case_ : Union[str, Any] = """lower newer"""
snake_case_ : int = """lower newer"""
return input_text, output_text
def a__ ( self :str ):
snake_case_ : Tuple = XLMTokenizer(self.vocab_file ,self.merges_file )
snake_case_ : str = """lower"""
snake_case_ : Union[str, Any] = ["""low""", """er</w>"""]
snake_case_ : str = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = tokens + ["""<unk>"""]
snake_case_ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
@slow
def a__ ( self :Optional[int] ):
snake_case_ : str = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
snake_case_ : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=_UpperCamelCase )
snake_case_ : List[Any] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_UpperCamelCase )
snake_case_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ,_UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 353 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : str = ['input_values', 'padding_mask']
def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,):
super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : Dict = chunk_length_s
snake_case_ : str = overlap
@property
def a__ ( self :Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self :List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
snake_case_ : Tuple = True
snake_case_ : str = bool(
isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ):
snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa )
elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
snake_case_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(_UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio )
snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) )
snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
snake_case_ : Any = max(array.shape[0] for array in raw_audio )
snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
snake_case_ : Union[str, Any] = """max_length"""
else:
snake_case_ : int = input_values
# normal padding on batch
if padded_inputs is None:
snake_case_ : Optional[int] = self.pad(
_UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,)
if padding:
snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" )
snake_case_ : Optional[int] = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
snake_case_ : Dict = example[..., None]
input_values.append(example.T )
snake_case_ : List[Any] = input_values
if return_tensors is not None:
snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs | 8 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowercase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowercase )
return parser.parse_args()
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = parse_args()
# Import training_script as a module.
lowerCAmelCase_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase_ : List[str] = script_fpath.stem
lowerCAmelCase_ : Optional[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
lowerCAmelCase_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 241 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int ) ->int:
'''simple docstring'''
if len(_lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
a : Optional[Any] = sum(array[:k] )
for i in range(len(_lowercase ) - k ):
a : Optional[Any] = current_sum - array[i] + array[i + k]
a : Union[str, Any] = max(_lowercase , _lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
a : Any = [randint(-1000, 1000) for i in range(100)]
a : List[str] = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 105 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Tuple = ["input_values", "padding_mask"]
def __init__( self , lowerCamelCase_ = 1 , lowerCamelCase_ = 2_40_00 , lowerCamelCase_ = 0.0 , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Any:
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = chunk_length_s
lowerCAmelCase__ = overlap
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = bool(
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(lowerCamelCase_ ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase_ ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
lowerCAmelCase__ = None
lowerCAmelCase__ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ = '''max_length'''
else:
lowerCAmelCase__ = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ = self.pad(
lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , padding=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
if padding:
lowerCAmelCase__ = padded_inputs.pop('''attention_mask''' )
lowerCAmelCase__ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
lowerCAmelCase__ = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ = input_values
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 352 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Dict = ""
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCamelCase_ = "" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ ) -> Any:
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ = fsspec.open(
lowerCamelCase_ , mode='''rb''' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ = None
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , lowerCamelCase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip('''/''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.dir_cache is None:
lowerCAmelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ = {f['''name''']: f}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
return self.file.open().read()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = "bz2"
lowercase__ : str = "bz2"
lowercase__ : Optional[int] = ".bz2"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "gzip"
lowercase__ : int = "gzip"
lowercase__ : int = ".gz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Tuple = "lz4"
lowercase__ : Optional[Any] = "lz4"
lowercase__ : int = ".lz4"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[int] = "xz"
lowercase__ : str = "xz"
lowercase__ : List[Any] = ".xz"
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Union[str, Any] = "zstd"
lowercase__ : Dict = ".zst"
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = "rb" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ = self.file.__enter__
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = file_
def __enter__( self ) -> Tuple:
self._file.__enter__()
return self
def __exit__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self ) -> Any:
return iter(self._file )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return next(self._file )
def __getattr__( self , lowerCamelCase_ ) -> str:
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ , **lowerCamelCase_ ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
lowerCAmelCase__ = fixed_enter | 228 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_a = logging.getLogger(__name__)
_a = tf.data.AUTOTUNE
def __a ( ):
UpperCAmelCase_ : Any = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config", type=__lowerCamelCase, default="roberta-base", help="The model config to use. Note that we don't copy the model's weights, only the config!", )
parser.add_argument(
"--tokenizer", type=__lowerCamelCase, default="unigram-tokenizer-wikitext", help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.", )
parser.add_argument(
"--per_replica_batch_size", type=__lowerCamelCase, default=8, help="Batch size per TPU core.", )
parser.add_argument(
"--no_tpu", action="store_true", help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.", )
parser.add_argument(
"--tpu_name", type=__lowerCamelCase, help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.", default="local", )
parser.add_argument(
"--tpu_zone", type=__lowerCamelCase, help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.", )
parser.add_argument(
"--gcp_project", type=__lowerCamelCase, help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16", action="store_true", help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.", )
parser.add_argument(
"--train_dataset", type=__lowerCamelCase, help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--shuffle_buffer_size", type=__lowerCamelCase, default=2**18, help="Size of the shuffle buffer (in samples)", )
parser.add_argument(
"--eval_dataset", type=__lowerCamelCase, help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket.", )
parser.add_argument(
"--num_epochs", type=__lowerCamelCase, default=1, help="Number of epochs to train for.", )
parser.add_argument(
"--learning_rate", type=__lowerCamelCase, default=1E-4, help="Learning rate to use for training.", )
parser.add_argument(
"--weight_decay_rate", type=__lowerCamelCase, default=1E-3, help="Weight decay rate to use for training.", )
parser.add_argument(
"--max_length", type=__lowerCamelCase, default=512, help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py", )
parser.add_argument(
"--mlm_probability", type=__lowerCamelCase, default=0.15, help="Fraction of tokens to mask during training.", )
parser.add_argument("--output_dir", type=__lowerCamelCase, required=__lowerCamelCase, help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id", type=__lowerCamelCase, help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ : Dict = parser.parse_args()
return args
def __a ( __lowerCamelCase ):
try:
if args.tpu_name:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
UpperCAmelCase_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
for file in file_list:
UpperCAmelCase_ : Union[str, Any] = file.split("/" )[-1]
UpperCAmelCase_ : Union[str, Any] = re.search(r"-\d+-(\d+)\.tfrecord", __lowerCamelCase ).group(1 )
UpperCAmelCase_ : List[Any] = int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
UpperCAmelCase_ : Optional[int] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
UpperCAmelCase_ : Dict = dataset.shuffle(len(__lowerCamelCase ) )
UpperCAmelCase_ : Any = tf.data.TFRecordDataset(__lowerCamelCase, num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : str = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
UpperCAmelCase_ : Any = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Optional[int] = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : Tuple = dataset.batch(__lowerCamelCase, drop_remainder=__lowerCamelCase )
UpperCAmelCase_ : Tuple = dataset.map(__lowerCamelCase, num_parallel_calls=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = dataset.prefetch(__lowerCamelCase )
return dataset
def __a ( __lowerCamelCase ):
if not args.no_tpu:
UpperCAmelCase_ : List[str] = initialize_tpu(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = tf.distribute.TPUStrategy(__lowerCamelCase )
else:
UpperCAmelCase_ : Dict = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : Any = tokenizer.vocab_size
UpperCAmelCase_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset, "*.tfrecord" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset, "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
UpperCAmelCase_ : Union[str, Any] = count_samples(__lowerCamelCase )
UpperCAmelCase_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : Dict = TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ : int = create_optimizer(
num_train_steps=__lowerCamelCase, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase, metrics=["accuracy"] )
def decode_fn(__lowerCamelCase ):
UpperCAmelCase_ : Dict = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase, __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase, mlm_probability=args.mlm_probability, mlm=__lowerCamelCase, return_tensors="tf" )
def mask_with_collator(__lowerCamelCase ):
# TF really needs an isin() function
UpperCAmelCase_ : List[Any] = (
~tf.cast(batch["attention_mask"], tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ : Dict = data_collator.tf_mask_tokens(
batch["input_ids"], vocab_size=len(__lowerCamelCase ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__lowerCamelCase, )
return batch
UpperCAmelCase_ : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : List[str] = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, shuffle_buffer_size=args.shuffle_buffer_size, )
UpperCAmelCase_ : int = prepare_dataset(
__lowerCamelCase, decode_fn=__lowerCamelCase, mask_fn=__lowerCamelCase, batch_size=__lowerCamelCase, shuffle=__lowerCamelCase, )
UpperCAmelCase_ : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase, validation_data=__lowerCamelCase, epochs=args.num_epochs, callbacks=__lowerCamelCase, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_a = parse_args()
main(args)
| 61 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = checkpoint
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ : List[str] = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ : Optional[int] = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ : List[Any] = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ : Any = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ : int = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ : Any = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ : List[Any] = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ : Tuple = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ : str = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ : List[str] = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ : List[Any] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : Optional[int] = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCamelCase )
}
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Any = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : Dict = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : List[Any] = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ : Union[str, Any] = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : int = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = num_up_blocks - 1 - i
UpperCAmelCase_ : Any = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : str = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : Optional[Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : Dict = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : List[str] = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ : List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Tuple = renew_vae_resnet_paths(__lowerCamelCase )
UpperCAmelCase_ : Tuple = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ : Any = renew_vae_attention_paths(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, additional_replacements=[meta_path], config=__lowerCamelCase )
conv_attn_to_linear(__lowerCamelCase )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
# Only support V1
UpperCAmelCase_ : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : List[Any] = io.BytesIO(r.content )
UpperCAmelCase_ : Any = OmegaConf.load(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = 512
UpperCAmelCase_ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : int = {}
with safe_open(__lowerCamelCase, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : Tuple = f.get_tensor(__lowerCamelCase )
else:
UpperCAmelCase_ : Any = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ : Dict = create_vae_diffusers_config(__lowerCamelCase, image_size=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : int = AutoencoderKL(**__lowerCamelCase )
vae.load_state_dict(__lowerCamelCase )
vae.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 61 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _a (unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def __A ( self , A__ ):
A__ : Optional[int] = GenerationConfig(
do_sample=A__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ , config_name=A__ )
A__ : str = GenerationConfig.from_pretrained(A__ , config_name=A__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A__ )
def __A ( self ):
A__ : List[str] = AutoConfig.from_pretrained("""gpt2""" )
A__ : Optional[int] = GenerationConfig.from_model_config(A__ )
A__ : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A__ , A__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __A ( self ):
A__ : Union[str, Any] = GenerationConfig()
A__ : Any = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
A__ : Optional[Any] = copy.deepcopy(A__ )
A__ : Union[str, Any] = generation_config.update(**A__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(A__ , A__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A__ , {"""foo""": """bar"""} )
def __A ( self ):
A__ : Optional[int] = GenerationConfig()
A__ : Any = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A__ )
A__ : List[Any] = GenerationConfig.from_pretrained(A__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
A__ : str = GenerationConfig.from_model_config(A__ )
assert not hasattr(A__ , """foo""" ) # no new kwargs should be initialized if from config
def __A ( self ):
A__ : List[str] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A__ )
self.assertEqual(default_config.num_beams , 1 )
A__ : List[str] = GenerationConfig(
do_sample=A__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A__ )
A__ : Optional[Any] = GenerationConfig.from_pretrained(A__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : Optional[Any] = TOKEN
HfFolder.save_token(A__ )
@classmethod
def __A ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def __A ( self ):
A__ : Optional[int] = GenerationConfig(
do_sample=A__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
A__ : List[str] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A__ , repo_id="""test-generation-config""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
def __A ( self ):
A__ : str = GenerationConfig(
do_sample=A__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
A__ : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A__ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A__ , use_auth_token=self._token )
A__ : str = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A__ , getattr(A__ , A__ ) )
| 141 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A_ : Optional[Any] = 16
A_ : Optional[int] = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 , lowercase_: str = "bert-base-cased" ) -> List[str]:
A__ : int = AutoTokenizer.from_pretrained(lowercase_ )
A__ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
A__ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ : int = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
def UpperCamelCase (lowercase_: Dict , lowercase_: Dict , lowercase_: Tuple , lowercase_: Optional[int] ) -> int:
model.eval()
A__ : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Any = model(**lowercase_ )
A__ : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase_ ) - 1:
A__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : int = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase (lowercase_: List[Any] , lowercase_: str ) -> List[str]:
# Initialize accelerator
A__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : List[Any] = config["""lr"""]
A__ : Union[str, Any] = int(config["""num_epochs"""] )
A__ : List[Any] = int(config["""seed"""] )
A__ : Optional[Any] = int(config["""batch_size"""] )
A__ : Tuple = args.model_name_or_path
set_seed(lowercase_ )
A__ , A__ : Optional[Any] = get_dataloaders(lowercase_ , lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple = AutoModelForSequenceClassification.from_pretrained(lowercase_ , return_dict=lowercase_ )
# Instantiate optimizer
A__ : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=lowercase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
A__ : Optional[int] = 1
A__ : Optional[int] = (len(lowercase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=0 , num_training_steps=lowercase_ , )
else:
A__ : int = DummyScheduler(lowercase_ , total_num_steps=lowercase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : str = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
A__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
A__ : Any = 0
A__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
A__ : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
A__ : Tuple = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ : Dict = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ : int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ : Any = int(lowercase_ ) + 1
A__ : Any = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.print("""resumed checkpoint performance:""" , lowercase_ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
A__ : int = json.load(lowercase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ : Optional[Any] = {}
for epoch in range(lowercase_ , lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
A__ : int = model(**lowercase_ )
A__ : int = outputs.loss
A__ : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ : Any = f"""epoch_{epoch}"""
A__ : int = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
A__ : List[Any] = evaluation_loop(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ : Tuple = accuracy
A__ : Optional[Any] = lr_scheduler.get_lr()[0]
A__ : Tuple = optimizer.param_groups[0]["""lr"""]
A__ : int = epoch
A__ : int = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase () -> int:
A__ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase_ , )
parser.add_argument(
"""--output_dir""" , type=lowercase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase_ , default=lowercase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase_ , default=lowercase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase_ , default=2 , help="""Number of train epochs.""" , )
A__ : List[str] = parser.parse_args()
A__ : List[str] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 141 | 1 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a : Optional[int] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
a : Tuple = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
a : Union[str, Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
a : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
a : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
a : Optional[int] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
a : str = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a, a : List[Any] = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
a : Dict = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
a, a : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->Tuple:
'''simple docstring'''
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize("hand, expected" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Tuple ) ->List[Any]:
'''simple docstring'''
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Any ) ->Union[str, Any]:
'''simple docstring'''
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Tuple ) ->int:
'''simple docstring'''
a : str = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : int ) ->str:
'''simple docstring'''
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : str , _lowercase : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : List[Any] , _lowercase : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def _SCREAMING_SNAKE_CASE ( ) ->Optional[int]:
'''simple docstring'''
a : Union[str, Any] = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
a : Union[str, Any] = poker_hands.copy()
shuffle(_lowercase )
a : Tuple = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE ( ) ->Optional[int]:
'''simple docstring'''
a : int = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : List[Any] = PokerHand("2C 4S AS 3D 5C" )
a : Optional[Any] = True
a : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : Tuple = 0
a : Union[str, Any] = os.path.abspath(os.path.dirname(_lowercase ) )
a : Optional[Any] = os.path.join(_lowercase , "poker_hands.txt" )
with open(_lowercase ) as file_hand:
for line in file_hand:
a : Tuple = line[:14].strip()
a : List[Any] = line[15:].strip()
a, a : Optional[int] = PokerHand(_lowercase ), PokerHand(_lowercase )
a : str = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 105 |
def UpperCamelCase (lowercase_: int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ : List[str] = 10**n
A__ : Any = 28433 * (pow(2 , 7830457 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 192 | 0 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
lowercase__ : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowercase__ : Any = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(lowerCamelCase__ ) , "Postfix".center(lowerCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = ")" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Optional[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 121 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase__ = 1.054571817e-34 # unit of ℏ : J * s
lowerCAmelCase__ = 3e8 # unit of c : m * s^-1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase__ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase__ : str = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase__ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 1 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path / """file.csv"""
_UpperCAmelCase : Dict = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = tmp_path / """malformed_file.csv"""
_UpperCAmelCase : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path / """csv_with_image.csv"""
_UpperCAmelCase : List[Any] = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tmp_path / """csv_with_label.csv"""
_UpperCAmelCase : List[str] = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
_UpperCAmelCase : str = tmp_path / """csv_with_int_list.csv"""
_UpperCAmelCase : int = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Csv()
_UpperCAmelCase : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as f:
_UpperCAmelCase : int = f.read().splitlines()[1]
_UpperCAmelCase : Optional[int] = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
_UpperCAmelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_UpperCAmelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
_UpperCAmelCase : Dict = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def snake_case_ ( lowerCAmelCase_ )-> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as f:
_UpperCAmelCase : List[str] = f.read().splitlines()[1:]
_UpperCAmelCase : str = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
_UpperCAmelCase : Any = csv._generate_tables([[csv_file_with_label]] )
_UpperCAmelCase : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
_UpperCAmelCase : Any = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCAmelCase_ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
_UpperCAmelCase : str = csv._generate_tables([[csv_file_with_int_list]] )
_UpperCAmelCase : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
_UpperCAmelCase : Optional[Any] = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 215 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowerCAmelCase_ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = LxmertTokenizer
def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_UpperCamelCase )
snake_case_ = do_lower_case
def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase ) | 8 | 0 |
def _lowerCAmelCase ( A__: dict ):
'''simple docstring'''
UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def _lowerCAmelCase ( A__: dict , A__: int , A__: set , A__: set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
def _lowerCAmelCase ( A__: list[int] , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 152 | 0 |
def __magic_name__ ( A : Optional[Any] ):
'''simple docstring'''
a = 0
a = len(A )
for i in range(n - 1 ):
for j in range(i + 1, A ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __magic_name__ ( A : Any ):
'''simple docstring'''
if len(A ) <= 1:
return arr, 0
a = len(A ) // 2
a = arr[0:mid]
a = arr[mid:]
a , a = count_inversions_recursive(A )
a , a = count_inversions_recursive(A )
a , a = _count_cross_inversions(A, A )
a = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __magic_name__ ( A : Union[str, Any], A : Any ):
'''simple docstring'''
a = []
a = a = a = 0
while i < len(A ) and j < len(A ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __magic_name__ ( ):
'''simple docstring'''
a = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
a = count_inversions_bf(A )
a , a = count_inversions_recursive(A )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = ", A )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
a = count_inversions_bf(A )
a , a = count_inversions_recursive(A )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = ", A )
# an empty list should also have zero inversions
a = []
a = count_inversions_bf(A )
a , a = count_inversions_recursive(A )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = ", A )
if __name__ == "__main__":
main()
| 107 |
import math
from datetime import datetime, timedelta
def __A ( __lowerCamelCase ) -> datetime:
a = year % 19
a = year % 4
a = year % 7
a = math.floor(year / 100 )
a = math.floor((13 + 8 * leap_day_inhibits) / 25 )
a = leap_day_inhibits / 4
a = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
a = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
a = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
a = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowerCamelCase , 4 , 18 )
else:
return datetime(__lowerCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__UpperCamelCase : Tuple = "will be" if year > datetime.now().year else "was"
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 228 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
super().setUp()
# fmt: off
A__ : List[Any] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : Union[str, Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : str =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowercase__ ( self : int , **lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
A__ : Any ="""lower newer"""
A__ : List[Any] ="""lower newer"""
return input_text, output_text
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : List[Any] ="""lower newer"""
A__ : Optional[Any] =["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
A__ : Optional[int] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Optional[Any] =tokens + [tokenizer.unk_token]
A__ : Union[str, Any] =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@require_ftfy
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Optional[int] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : str ="""A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
A__ : Optional[int] =tokenizer_s.tokenize(lowerCAmelCase_ )
A__ : str =tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A__ : Optional[Any] ="""xa\u0303y""" + """ """ + """x\xe3y"""
A__ : List[Any] =tokenizer_s.tokenize(lowerCAmelCase_ )
A__ : List[Any] =tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of space type
A__ : int =[
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A__ : Tuple =tokenizer_s.tokenize(lowerCAmelCase_ )
A__ : str =tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of line break type
A__ : int =[
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A__ : List[Any] =tokenizer_s.tokenize(lowerCAmelCase_ )
A__ : Tuple =tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Dict ="""hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ : Tuple =f"{text_of_1_token} {text_of_1_token}"
A__ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A__ : Optional[int] =tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
A__ : Tuple =f" {text}"
A__ : str =self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A__ : List[Any] =tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
def lowercase__ ( self : str ) -> Any:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 136 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ : List[Any] ={}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
A__ : str =truncation
A__ : Optional[int] =tokenize_kwargs
A__ : List[Any] ={}
if return_tensors is not None:
A__ : Any =return_tensors
return preprocess_params, {}, postprocess_params
def lowercase__ ( self : int , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Dict[str, GenericTensor]:
'''simple docstring'''
A__ : List[str] =self.framework
A__ : Union[str, Any] =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
return model_inputs
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.model(**lowerCAmelCase_ )
return model_outputs
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False ) -> List[Any]:
'''simple docstring'''
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : int , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 136 | 1 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCAmelCase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def snake_case ( self : Tuple , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Any=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__lowercase , __lowercase , sample_weight=__lowercase ) ),
}
| 141 |
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Sequence[float], lowercase__ : float ):
'''simple docstring'''
__lowercase =0.0
for coeff in reversed(lowercase__ ):
__lowercase =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 141 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Any=99 , lowerCAmelCase__ : Dict=32 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=37 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[int]="None" , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Optional[int]=None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = parent
SCREAMING_SNAKE_CASE_: List[Any] = batch_size
SCREAMING_SNAKE_CASE_: List[Any] = seq_length
SCREAMING_SNAKE_CASE_: List[Any] = is_training
SCREAMING_SNAKE_CASE_: List[str] = use_input_mask
SCREAMING_SNAKE_CASE_: int = use_token_type_ids
SCREAMING_SNAKE_CASE_: List[Any] = use_labels
SCREAMING_SNAKE_CASE_: int = vocab_size
SCREAMING_SNAKE_CASE_: Any = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_: Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = num_labels
SCREAMING_SNAKE_CASE_: str = num_choices
SCREAMING_SNAKE_CASE_: str = relative_attention
SCREAMING_SNAKE_CASE_: int = position_biased_input
SCREAMING_SNAKE_CASE_: Optional[int] = pos_att_type
SCREAMING_SNAKE_CASE_: List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: str = None
SCREAMING_SNAKE_CASE_: Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_: str = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = TFDebertaVaModel(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_: Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = TFDebertaVaForMaskedLM(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE_: List[Any] = TFDebertaVaForSequenceClassification(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: str = self.num_labels
SCREAMING_SNAKE_CASE_: str = TFDebertaVaForTokenClassification(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
): List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Any = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Tuple = TFDebertaVaModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
self.assertIsNotNone(lowerCAmelCase__)
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
SCREAMING_SNAKE_CASE_: Dict = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
SCREAMING_SNAKE_CASE_: List[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4)
| 369 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 127 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase__ : Any = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a , a , a ) -> Tuple[int, int]:
def constraint_to_multiple_of(a , a , a=0 , a=None ):
_A: List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_A: int = math.floor(val / multiple ) * multiple
if x < min_val:
_A: Tuple = math.ceil(val / multiple ) * multiple
return x
_A: str = (output_size, output_size) if isinstance(a , a ) else output_size
_A , _A: List[str] = get_image_size(a )
_A , _A: List[str] = output_size
# determine new height and width
_A: List[Any] = output_height / input_height
_A: Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_A: str = scale_width
else:
# fit height
_A: List[str] = scale_height
_A: Union[str, Any] = constraint_to_multiple_of(scale_height * input_height , multiple=a )
_A: Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=a )
return (new_height, new_width)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = ['''pixel_values''']
def __init__( self : List[str] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: Tuple = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_A: Dict = get_size_dict(lowerCAmelCase_ )
_A: Dict = do_resize
_A: Dict = size
_A: Union[str, Any] = keep_aspect_ratio
_A: int = ensure_multiple_of
_A: Union[str, Any] = resample
_A: Dict = do_rescale
_A: Optional[int] = rescale_factor
_A: List[str] = do_normalize
_A: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A: str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: Dict = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A: List[str] = get_resize_output_image_size(
lowerCAmelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=lowerCAmelCase_ , multiple=lowerCAmelCase_ , )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: Optional[int] = do_resize if do_resize is not None else self.do_resize
_A: Dict = size if size is not None else self.size
_A: Optional[int] = get_size_dict(lowerCAmelCase_ )
_A: List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_A: Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_A: Optional[Any] = resample if resample is not None else self.resample
_A: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_A: Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: Dict = do_normalize if do_normalize is not None else self.do_normalize
_A: List[str] = image_mean if image_mean is not None else self.image_mean
_A: Optional[int] = image_std if image_std is not None else self.image_std
_A: Any = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A: List[str] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_A: List[Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: Any = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_A: Dict = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_A: List[str] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Tuple] = None ):
"""simple docstring"""
_A: Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_A: Union[str, Any] = target_sizes.numpy()
_A: List[Any] = []
for idx in range(len(lowerCAmelCase_ ) ):
_A: str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_A: List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_A: List[Any] = logits.argmax(dim=1 )
_A: List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 121 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( a ) -> int:
_A: int = filter(lambda a : p.requires_grad , model.parameters() )
_A: Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : str = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Dict:
if metric == "rouge2":
_A: Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_A: List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_A: str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_A: List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a , verbose=a , )
class UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Union[str, Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_A: Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_A: Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A: List[str] = od / '''test_results.txt'''
_A: Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A: Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_A: Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A: Optional[int] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_A: List[str] = val.item()
_A: List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
_A: Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
try:
_A: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_A: Any = pl_module.model.num_parameters()
_A: Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 121 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :int = "luke"
def __init__( self : Dict , UpperCamelCase : Optional[int]=5_02_67 , UpperCamelCase : List[Any]=50_00_00 , UpperCamelCase : str=7_68 , UpperCamelCase : List[str]=2_56 , UpperCamelCase : Tuple=12 , UpperCamelCase : int=12 , UpperCamelCase : Optional[Any]=30_72 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : Tuple=2 , UpperCamelCase : int=0.02 , UpperCamelCase : Dict=1E-1_2 , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=1 , UpperCamelCase : str=0 , UpperCamelCase : int=2 , **UpperCamelCase : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Any = entity_vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Optional[Any] = entity_emb_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : List[str] = use_entity_aware_attention
lowerCAmelCase__ : List[str] = classifier_dropout
| 353 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : List[Any] , UpperCamelCase : Dict=-1 ) -> List[Any]:
"""simple docstring"""
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ : Optional[int] = label_idx
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : int = mode.value
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Tuple = []
else:
lowerCAmelCase__ : Optional[int] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( self : Any , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ : Union[str, Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowerCAmelCase ( self : str , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : List[str] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
lowerCAmelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ : str = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = mode.value
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , f"""{mode}.txt""" )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = []
with open(UpperCamelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase ) == len(UpperCamelCase )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCamelCase , labels=UpperCamelCase ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = 0
for sentence in parse_incr(UpperCamelCase ):
lowerCAmelCase__ : Union[str, Any] = preds_list[example_id]
lowerCAmelCase__ : List[Any] = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase )
example_id += 1
def _lowerCAmelCase ( self : Dict , UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(UpperCamelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 212 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_lowerCAmelCase :List[str] = random.Random()
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None ):
if rng is None:
_UpperCAmelCase : Tuple = global_rng
_UpperCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=4_0_0 , A=2_0_0_0 , A=1_0 , A=1_6_0 , A=8 , A=0.0 , A=4_0_0_0 , A=False , A=True , ) -> List[Any]:
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : int = min_seq_length
_UpperCAmelCase : str = max_seq_length
_UpperCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : Union[str, Any] = feature_size
_UpperCAmelCase : Tuple = chunk_length
_UpperCAmelCase : str = hop_length
def __lowerCAmelCase ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self , A=False , A=False ) -> str:
def _flatten(A ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
_UpperCAmelCase : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : str = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =WhisperFeatureExtractor if is_speech_available() else None
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = WhisperFeatureExtractionTester(self )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : str = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(__lowercase )
_UpperCAmelCase : int = feat_extract_first.to_dict()
_UpperCAmelCase : Optional[int] = feat_extract_second.to_dict()
_UpperCAmelCase : int = feat_extract_first.mel_filters
_UpperCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Tuple = os.path.join(__lowercase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__lowercase )
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class.from_json_file(__lowercase )
_UpperCAmelCase : Tuple = feat_extract_first.to_dict()
_UpperCAmelCase : str = feat_extract_second.to_dict()
_UpperCAmelCase : List[Any] = feat_extract_first.mel_filters
_UpperCAmelCase : Optional[int] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def __lowerCAmelCase ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase : Dict = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : Dict = feature_extractor(__lowercase , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_UpperCAmelCase : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test batched
_UpperCAmelCase : str = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
_UpperCAmelCase : Union[str, Any] = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase : Optional[int] = np.asarray(__lowercase )
_UpperCAmelCase : Optional[Any] = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
_UpperCAmelCase : Dict = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test truncation required
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_UpperCAmelCase : Any = [np.asarray(__lowercase ) for speech_input in speech_inputs]
_UpperCAmelCase : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase : Union[str, Any] = [np.asarray(__lowercase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase : Optional[int] = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
_UpperCAmelCase : List[str] = feature_extractor(__lowercase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> List[Any]:
import torch
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[str] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_UpperCAmelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : int = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCAmelCase ( self , A ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_UpperCAmelCase : Dict = ds.sort('''id''' ).select(range(__lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
_UpperCAmelCase : Any = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
_UpperCAmelCase : Any = self._load_datasamples(1 )
_UpperCAmelCase : Union[str, Any] = WhisperFeatureExtractor()
_UpperCAmelCase : List[str] = feature_extractor(__lowercase , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __lowercase , atol=1E-4 ) )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Tuple = self._load_datasamples(1 )[0]
_UpperCAmelCase : List[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_UpperCAmelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowercase )[0]
self.assertTrue(np.all(np.mean(__lowercase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase ) - 1 ) < 1E-3 ) )
| 263 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """CLIPImageProcessor"""
snake_case_ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : int=None , **__lowercase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , **__lowercase : str ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : int =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def __magic_name__ ( self : int , *__lowercase : Optional[Any] , **__lowercase : Tuple ) -> Dict:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 152 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_snake_case = logging.get_logger(__name__)
_snake_case = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase_ ( a):
def __init__( self, __a=None, __a=None, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
if config is None:
assert isinstance(self.model, __a), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_lowerCAmelCase : Tuple = self.model.config
else:
_lowerCAmelCase : List[Any] = config
_lowerCAmelCase : Dict = data_args
_lowerCAmelCase : List[str] = self.config.tgt_vocab_size if isinstance(self.config, __a) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
" padding..")
if self.args.label_smoothing == 0:
_lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCAmelCase : str = label_smoothed_nll_loss
def snake_case__ ( self, __a):
'''simple docstring'''
if self.optimizer is None:
_lowerCAmelCase : Optional[Any] = ["bias", "LayerNorm.weight"]
_lowerCAmelCase : int = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
_lowerCAmelCase : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCAmelCase : List[Any] = Adafactor
_lowerCAmelCase : List[str] = {"scale_parameter": False, "relative_step": False}
else:
_lowerCAmelCase : str = AdamW
_lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
_lowerCAmelCase : Dict = OSS(
params=__a, optim=__a, **__a, )
else:
_lowerCAmelCase : Union[str, Any] = optimizer_cls(__a, **__a)
if self.lr_scheduler is None:
_lowerCAmelCase : Dict = self._get_lr_scheduler(__a)
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCAmelCase : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
_lowerCAmelCase : Any = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=__a)
return scheduler
def snake_case__ ( self):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCAmelCase : List[Any] = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Tuple = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
# compute usual loss via models
_lowerCAmelCase : Optional[Any] = model(**__a, labels=__a, use_cache=__a)[:2]
else:
# compute label smoothed loss
_lowerCAmelCase : str = model(**__a, use_cache=__a)[0]
_lowerCAmelCase : Any = torch.nn.functional.log_softmax(__a, dim=-1)
_lowerCAmelCase : Union[str, Any] = self.loss_fn(__a, __a, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return loss, logits
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = inputs.pop("labels")
_lowerCAmelCase : Optional[Any] = self._compute_loss(__a, __a, __a)
return loss
def snake_case__ ( self, __a, __a, __a, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Any = self._prepare_inputs(__a)
_lowerCAmelCase : str = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCAmelCase : Any = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **__a, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Dict = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
_lowerCAmelCase : Union[str, Any] = inputs.pop("labels")
with torch.no_grad():
# compute loss on predict data
_lowerCAmelCase : Any = self._compute_loss(__a, __a, __a)
_lowerCAmelCase : str = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCAmelCase : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase : Union[str, Any] = self._pad_tensors_to_max_len(__a, gen_kwargs["max_length"])
return (loss, logits, labels)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f" padded to `max_length`={max_length}")
_lowerCAmelCase : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device)
_lowerCAmelCase : int = tensor
return padded_tensor
| 354 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300 | 0 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase : str = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = test_results.split(""" """ )
lowercase_ = 0
lowercase_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase_ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = {}
lowercase_ = None
lowercase_ = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , __lowerCAmelCase ):
lowercase_ = True
lowercase_ = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowercase_ = line
lowercase_ = False
return failures
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = title
lowercase_ = doc_test_results["""time_spent"""].split(""",""")[0]
lowercase_ = doc_test_results["""success"""]
lowercase_ = doc_test_results["""failures"""]
lowercase_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase_ = doc_test_results
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = [self._time_spent]
lowercase_ = 0
for time in time_spent:
lowercase_ = time.split(""":""")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_) == 1:
lowercase_ = [0, 0, time_parts[0]]
lowercase_ , lowercase_ , lowercase_ = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
lowercase_ , lowercase_ , lowercase_ = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F'''{int(lowerCAmelCase_)}h{int(lowerCAmelCase_)}m{int(lowerCAmelCase_)}s'''
@property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = 4_0
lowercase_ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_)}
lowercase_ = """"""
for category, failures in category_failures.items():
if len(lowerCAmelCase_) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase_)
@staticmethod
def _UpperCAmelCase ( ):
"""simple docstring"""
lowercase_ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""")
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_)}))
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : int):
"""simple docstring"""
print("""Sending the following payload""")
print(json.dumps({"""blocks""": json.loads(self.payload)}))
lowercase_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
lowercase_ = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = """"""
for key, value in failures.items():
lowercase_ = value[:2_0_0] + """ [Truncated]""" if len(lowerCAmelCase_) > 2_5_0 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowercase_ = job_name
lowercase_ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowercase_ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""")
lowercase_ = self.doc_test_results.pop("""job_link""")
self.doc_test_results.pop("""failures""")
self.doc_test_results.pop("""success""")
self.doc_test_results.pop("""time_spent""")
lowercase_ = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_: t[0])
for job, job_result in sorted_dict:
if len(job_result["""failures"""]):
lowercase_ = F'''*Num failures* :{len(job_result["failed"])} \n'''
lowercase_ = job_result["""failures"""]
lowercase_ = self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_)
print("""Sending the following reply""")
print(json.dumps({"""blocks""": blocks}))
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1)
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.environ["""GITHUB_RUN_ID"""]
lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowercase_ = requests.get(__lowerCAmelCase ).json()
lowercase_ = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowercase_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(__lowerCAmelCase ):
lowercase_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , __lowerCAmelCase )
return {}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = {}
if os.path.exists(__lowerCAmelCase ):
lowercase_ = os.listdir(__lowerCAmelCase )
for file in files:
try:
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , encoding="""utf-8""" ) as f:
lowercase_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCAmelCase , __lowerCAmelCase )}.''' ) from e
return _artifact
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = name
lowercase_ = []
def __str__( self : int):
"""simple docstring"""
return self.name
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : str):
"""simple docstring"""
self.paths.append({"""name""": self.name, """path""": path})
lowercase_ = {}
lowercase_ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase_ = directory
if artifact_name not in _available_artifacts:
lowercase_ = Artifact(__lowerCAmelCase )
_available_artifacts[artifact_name].add_path(__lowerCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = get_job_links()
UpperCAmelCase : Union[str, Any] = retrieve_available_artifacts()
UpperCAmelCase : Any = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase : List[Any] = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase : Any = github_actions_job_links.get("run_doctests")
UpperCAmelCase : Tuple = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
UpperCAmelCase : Optional[Any] = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = handle_test_results(artifact["stats"])
UpperCAmelCase : str = failed
UpperCAmelCase : List[str] = success
UpperCAmelCase : List[str] = time_spent[1:-1] + ", "
UpperCAmelCase : Dict = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
UpperCAmelCase : Optional[int] = line.replace("FAILED ", "")
UpperCAmelCase : str = line.split()[0].replace("\n", "")
if "::" in line:
UpperCAmelCase , UpperCAmelCase : int = line.split("::")
else:
UpperCAmelCase , UpperCAmelCase : List[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase : Tuple = all_failures[test] if test in all_failures else "N/A"
UpperCAmelCase : List[Any] = failure
break
UpperCAmelCase : List[str] = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 136 |
"""simple docstring"""
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 136 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = CLIPTokenizer
UpperCAmelCase__ : Optional[int] = CLIPTokenizerFast
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : List[str] = False
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: Dict ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ):
__lowerCamelCase = """lower newer"""
__lowerCamelCase = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
@require_ftfy
def lowerCAmelCase__ ( self: Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase = tokenizer_s.tokenize(UpperCamelCase_ )
__lowerCamelCase = tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
__lowerCamelCase = F' {text}'
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , )
__lowerCamelCase = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
def lowerCAmelCase__ ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self: Any ):
# CLIP always lower cases letters
pass
| 352 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: List[Any] ):
__lowerCamelCase, __lowerCamelCase = {}, {}
if padding is not None:
__lowerCamelCase = padding
if truncation is not None:
__lowerCamelCase = truncation
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Optional[Any] , UpperCamelCase_: Union["Image.Image", str] , UpperCamelCase_: str = None , **UpperCamelCase_: List[str] ):
if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = {"""image""": image, """question""": question}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[int]=False ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
return model_inputs
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.sigmoid()[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29 | 0 |
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = name
snake_case_ : int = val
def __str__(self ) -> Any:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__(self , __magic_name__ ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : int = {}
snake_case_ : Optional[int] = self.build_heap(__snake_case )
def __getitem__(self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.get_value(__snake_case )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
return (idx - 1) // 2
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
return idx * 2 + 1
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
return idx * 2 + 2
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.heap_dict[key]
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = len(__snake_case ) - 1
snake_case_ : Tuple = self.get_parent_idx(__snake_case )
for idx, i in enumerate(__snake_case ):
snake_case_ : str = idx
snake_case_ : Optional[int] = i.val
for i in range(__snake_case , -1 , -1 ):
self.sift_down(__snake_case , __snake_case )
return array
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
while True:
snake_case_ : Dict = self.get_left_child_idx(__snake_case ) # noqa: E741
snake_case_ : Tuple = self.get_right_child_idx(__snake_case )
snake_case_ : str = idx
if l < len(__snake_case ) and array[l] < array[idx]:
snake_case_ : Optional[int] = l
if r < len(__snake_case ) and array[r] < array[smallest]:
snake_case_ : int = r
if smallest != idx:
snake_case_ , snake_case_ : str = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ : int = smallest
else:
break
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_parent_idx(__snake_case )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ : Optional[int] = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ : str = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ : Optional[Any] = p
snake_case_ : str = self.get_parent_idx(__snake_case )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return self.heap[0]
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ : List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ : List[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
self.heap.append(__snake_case )
snake_case_ : Optional[int] = len(self.heap ) - 1
snake_case_ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return len(self.heap ) == 0
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ : Optional[Any] = new_value
snake_case_ : Optional[int] = new_value
self.sift_up(self.idx_of_element[node] )
lowerCAmelCase_ = Node('''R''', -1)
lowerCAmelCase_ = Node('''B''', 6)
lowerCAmelCase_ = Node('''A''', 3)
lowerCAmelCase_ = Node('''X''', 1)
lowerCAmelCase_ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 |
import random
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = a[left_index]
snake_case = left_index + 1
for j in range(left_index + 1 ,UpperCamelCase_ ):
if a[j] < pivot:
snake_case , snake_case = a[i], a[j]
i += 1
snake_case , snake_case = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if left < right:
snake_case = random.randint(UpperCamelCase_ ,right - 1 )
snake_case , snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
snake_case = partition(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
quick_sort_random(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase_ ,pivot_index + 1 ,UpperCamelCase_ ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = input('''Enter numbers separated by a comma:\n''' ).strip()
snake_case = [int(UpperCamelCase_ ) for item in user_input.split(''',''' )]
quick_sort_random(UpperCamelCase_ ,0 ,len(UpperCamelCase_ ) )
print(UpperCamelCase_ )
if __name__ == "__main__":
main()
| 127 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
UpperCAmelCase : List[Any] = ValueError('a should be a positive number' )
raise my_error
UpperCAmelCase : Tuple = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = (0, 0, 0)
UpperCAmelCase : Optional[Any] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase__ = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
lowercase__ = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 367 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Any:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :Tuple = random.Random()
lowerCAmelCase__ :Optional[int] = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ :Union[str, Any] = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ :str = np.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE_ )
return output
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = ids_tensor(SCREAMING_SNAKE_CASE_ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE_ )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ :Dict = 1
return attn_mask
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Dict = None
__magic_name__ :Optional[int] = ()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ :Tuple = 2
lowerCAmelCase__ :List[str] = inputs['input_ids'].shape[-1] // 2
lowerCAmelCase__ :Tuple = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCAmelCase__ :Optional[Any] = jnp.ones_like(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ :Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ :List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[int] = False
lowerCAmelCase__ :Union[str, Any] = max_length
lowerCAmelCase__ :Tuple = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ :List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pt_model_class(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Tuple = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
lowerCAmelCase__ :Dict = flax_model.generate(__UpperCAmelCase ).sequences
lowerCAmelCase__ :Dict = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ :Optional[int] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self._get_input_ids_and_config()
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :int = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Any = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :Any = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :str = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :List[Any] = False
lowerCAmelCase__ :List[str] = max_length
lowerCAmelCase__ :str = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :str = jit(model.generate )
lowerCAmelCase__ :Tuple = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :Optional[Any] = max_length
lowerCAmelCase__ :List[str] = 2
lowerCAmelCase__ :List[Any] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Any = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :List[Any] = True
lowerCAmelCase__ :Optional[Any] = max_length
lowerCAmelCase__ :int = 0.8
lowerCAmelCase__ :Optional[Any] = 1_0
lowerCAmelCase__ :Optional[Any] = 0.3
lowerCAmelCase__ :Optional[Any] = 1
lowerCAmelCase__ :Union[str, Any] = 8
lowerCAmelCase__ :Any = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :str = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Dict = max_length
lowerCAmelCase__ :int = 1
lowerCAmelCase__ :int = 8
lowerCAmelCase__ :List[str] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Optional[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jit(model.generate )
lowerCAmelCase__ :Tuple = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Union[str, Any] = max_length
lowerCAmelCase__ :int = 2
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :Optional[int] = 8
lowerCAmelCase__ :Optional[int] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :str = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :str = False
lowerCAmelCase__ :Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :List[Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :List[Any] = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :str = True
lowerCAmelCase__ :Tuple = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :str = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :str = 2
lowerCAmelCase__ :List[str] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :List[Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCAmelCase__ :Optional[int] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ :List[Any] = 'Hello world'
lowerCAmelCase__ :Any = tokenizer(__UpperCAmelCase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , 'do_samples' ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , 'foo' ):
lowerCAmelCase__ :Optional[int] = {'foo': 'bar'}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 293 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = data
lowerCAmelCase__ : List[Any] = [0X67_452_301, 0Xef_cda_b89, 0X98_bad_cfe, 0X10_325_476, 0Xc3_d2e_1f0]
@staticmethod
def _lowerCamelCase ( a : int , a : List[str] ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xff_fff_fff
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ : List[str] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _lowerCamelCase ( self : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ : int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.padding()
lowerCAmelCase__ : List[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ : Tuple = (b & c) | ((~b) & d)
lowerCAmelCase__ : int = 0X5a_827_999
elif 20 <= i < 40:
lowerCAmelCase__ : List[str] = b ^ c ^ d
lowerCAmelCase__ : Any = 0X6e_d9e_ba1
elif 40 <= i < 60:
lowerCAmelCase__ : Tuple = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Tuple = 0X8f_1bb_cdc
elif 60 <= i < 80:
lowerCAmelCase__ : List[Any] = b ^ c ^ d
lowerCAmelCase__ : int = 0Xca_62c_1d6
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0Xff_fff_fff,
a,
self.rotate(a , 30 ),
c,
d,
)
lowerCAmelCase__ : Optional[Any] = (
self.h[0] + a & 0Xff_fff_fff,
self.h[1] + b & 0Xff_fff_fff,
self.h[2] + c & 0Xff_fff_fff,
self.h[3] + d & 0Xff_fff_fff,
self.h[4] + e & 0Xff_fff_fff,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[int] = b'Test String'
assert SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE_ ).hexdigest() # noqa: S324
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowerCAmelCase__ : List[Any] = f.read()
else:
lowerCAmelCase__ : Tuple = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
print(SHAaHash(SCREAMING_SNAKE_CASE_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | 212 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =AlbertTokenizer
A__ : List[str] =AlbertTokenizerFast
A__ : Optional[Any] =True
A__ : Optional[Any] =True
A__ : Any =True
def A_ ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = AlbertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 'this is a test'
SCREAMING_SNAKE_CASE__ = 'this is a test'
return input_text, output_text
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase_ ) , 30000 )
def A_ ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : Tuple ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = AlbertTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [48, 25, 21, 1289] )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = AlbertTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 169 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
assert column_title.isupper()
a__: Tuple = 0
a__: Optional[Any] = len(_lowerCAmelCase ) - 1
a__: List[str] = 0
while index >= 0:
a__: int = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'perceiver'
def __init__( self ,__UpperCamelCase=256 ,__UpperCamelCase=1280 ,__UpperCamelCase=768 ,__UpperCamelCase=1 ,__UpperCamelCase=26 ,__UpperCamelCase=8 ,__UpperCamelCase=8 ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="kv" ,__UpperCamelCase=1 ,__UpperCamelCase=1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=262 ,__UpperCamelCase=2048 ,__UpperCamelCase=56 ,__UpperCamelCase=[368, 496] ,__UpperCamelCase=16 ,__UpperCamelCase=1920 ,__UpperCamelCase=16 ,__UpperCamelCase=[1, 16, 224, 224] ,**__UpperCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Dict = num_latents
lowercase_ : List[str] = d_latents
lowercase_ : Tuple = d_model
lowercase_ : Optional[Any] = num_blocks
lowercase_ : Dict = num_self_attends_per_block
lowercase_ : Tuple = num_self_attention_heads
lowercase_ : Optional[Any] = num_cross_attention_heads
lowercase_ : Tuple = qk_channels
lowercase_ : Tuple = v_channels
lowercase_ : Any = cross_attention_shape_for_attention
lowercase_ : Any = self_attention_widening_factor
lowercase_ : str = cross_attention_widening_factor
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Dict = layer_norm_eps
lowercase_ : int = use_query_residual
# masked language modeling attributes
lowercase_ : Tuple = vocab_size
lowercase_ : Union[str, Any] = max_position_embeddings
# image classification attributes
lowercase_ : Union[str, Any] = image_size
# flow attributes
lowercase_ : Dict = train_size
# multimodal autoencoding attributes
lowercase_ : Optional[int] = num_frames
lowercase_ : List[Any] = audio_samples_per_frame
lowercase_ : str = samples_per_patch
lowercase_ : Dict = output_shape
class UpperCamelCase ( lowercase_ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def _UpperCAmelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = 3 ,__UpperCamelCase = 40 ,__UpperCamelCase = 40 ,) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ : List[Any] = compute_effective_axis_dimension(
__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ : Optional[Any] = preprocessor.num_special_tokens_to_add(__UpperCamelCase )
lowercase_ : int = compute_effective_axis_dimension(
__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase_ : Dict = [' '.join(['a'] ) * seq_length] * batch_size
lowercase_ : int = dict(preprocessor(__UpperCamelCase ,return_tensors=__UpperCamelCase ) )
lowercase_ : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ : Union[str, Any] = compute_effective_axis_dimension(__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase_ : Dict = self._generate_dummy_images(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Union[str, Any] = dict(preprocessor(images=__UpperCamelCase ,return_tensors=__UpperCamelCase ) )
lowercase_ : List[Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 369 | """simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_UpperCAmelCase : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_UpperCAmelCase : Optional[Any] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_UpperCAmelCase : str = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
# load decoder from hub
_UpperCAmelCase : List[Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def _lowerCAmelCase ( self : int , **lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , **lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCAmelCase ( self : Any , **lowerCAmelCase__ : List[str] ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_UpperCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_decoder()
_UpperCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Tuple = floats_list((3, 1_0_0_0) )
_UpperCAmelCase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="np" )
_UpperCAmelCase : Any = processor(_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = self.get_decoder()
_UpperCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : List[Any] = 'This is a test string'
_UpperCAmelCase : str = processor(text=_UpperCamelCase )
_UpperCAmelCase : List[str] = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Any=(2, 1_0, 1_6) , lowerCAmelCase__ : int=7_7 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(_UpperCamelCase )
return np.random.rand(*_UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_decoder()
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : int = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
_UpperCAmelCase : Union[str, Any] = processor.decode(_UpperCamelCase )
_UpperCAmelCase : List[Any] = decoder.decode_beams(_UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(_UpperCamelCase )
else:
with get_context(_UpperCamelCase ).Pool() as pool:
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : List[Any] = list(_UpperCamelCase )
with get_context("fork" ).Pool() as p:
_UpperCAmelCase : Dict = decoder.decode_beams_batch(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCamelCase , decoded_processor.lm_score )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_decoder()
_UpperCAmelCase : int = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Any = self._get_dummy_logits()
_UpperCAmelCase : List[str] = 1_5
_UpperCAmelCase : str = -20.0
_UpperCAmelCase : List[Any] = -4.0
_UpperCAmelCase : Dict = processor.batch_decode(
_UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
_UpperCAmelCase : List[str] = decoded_processor_out.text
_UpperCAmelCase : Dict = list(_UpperCamelCase )
with get_context("fork" ).Pool() as pool:
_UpperCAmelCase : Optional[int] = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
_UpperCAmelCase : Dict = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _UpperCamelCase )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCamelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCamelCase , atol=1e-3 ) )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = 2.0
_UpperCAmelCase : List[str] = 5.0
_UpperCAmelCase : Optional[Any] = -20.0
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = processor.batch_decode(
_UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
_UpperCAmelCase : Dict = decoded_processor_out.text
_UpperCAmelCase : Any = list(_UpperCamelCase )
decoder.reset_params(
alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
with get_context("fork" ).Pool() as pool:
_UpperCAmelCase : Tuple = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , )
_UpperCAmelCase : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _UpperCamelCase )
_UpperCAmelCase : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCAmelCase : List[Any] = os.listdir(_UpperCamelCase )
_UpperCAmelCase : List[str] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = snapshot_download("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(_UpperCamelCase )
_UpperCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase : Dict = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_UpperCAmelCase : int = os.listdir(_UpperCamelCase )
_UpperCAmelCase : Dict = os.listdir(_UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Any = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_UpperCAmelCase : Tuple = processor_wavaveca(_UpperCamelCase , return_tensors="np" )
_UpperCAmelCase : List[str] = processor_auto(_UpperCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_UpperCAmelCase : Optional[Any] = self._get_dummy_logits()
_UpperCAmelCase : List[Any] = processor_wavaveca.batch_decode(_UpperCamelCase )
_UpperCAmelCase : List[str] = processor_auto.batch_decode(_UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_decoder()
_UpperCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : Tuple = self._get_dummy_logits()[0]
_UpperCAmelCase : Union[str, Any] = processor.decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_UpperCAmelCase : List[Any] = self._get_dummy_logits()
_UpperCAmelCase : Any = processor.batch_decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
import torch
_UpperCAmelCase : Optional[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=_UpperCamelCase )
_UpperCAmelCase : Dict = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
_UpperCAmelCase : Any = iter(_UpperCamelCase )
_UpperCAmelCase : int = next(_UpperCamelCase )
_UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_UpperCAmelCase : List[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase : Dict = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(_UpperCamelCase ).logits.cpu().numpy()
_UpperCAmelCase : List[Any] = processor.decode(logits[0] , output_word_offsets=_UpperCamelCase )
_UpperCAmelCase : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase : Tuple = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_UpperCAmelCase : List[str] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) , _UpperCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(_UpperCamelCase , "word" ) ) , output.text )
# output times
_UpperCAmelCase : Tuple = torch.tensor(self.get_from_offsets(_UpperCamelCase , "start_time" ) )
_UpperCAmelCase : List[Any] = torch.tensor(self.get_from_offsets(_UpperCamelCase , "end_time" ) )
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) ) | 145 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : Any = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
lowerCAmelCase_ : List[Any] = PegasusConfig
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : int = "gelu"
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=1 , UpperCAmelCase_ : Tuple=0 , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Dict = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : List[str] = pad_token_id
lowerCAmelCase : Optional[int] = bos_token_id
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Tuple = prepare_pegasus_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Union[str, Any] = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = 20
lowerCAmelCase : Tuple = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase , lowerCAmelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ )
lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase : Dict = np.not_equal(_UpperCAmelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Union[str, Any] = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = FlaxPegasusModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = model_class(UpperCAmelCase_ )
@jax.jit
def encode_jitted(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any ):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[Any] = encode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Tuple = encode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Dict = model_class(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCAmelCase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest('JIT Enabled' ):
lowerCAmelCase : Optional[int] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Any = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : int = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
lowerCAmelCase : Optional[int] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase : List[str] = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
lowerCAmelCase : List[Any] = tokenizer(UpperCAmelCase_ , return_tensors='np' , truncation=UpperCAmelCase_ , max_length=512 , padding=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCAmelCase_ , num_beams=2 ).sequences
lowerCAmelCase : List[str] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
assert tgt_text == decoded
| 323 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
a__ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
snake_case__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : Optional[str] = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case__ : Optional[int] = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
if self.train_file is not None:
__SCREAMING_SNAKE_CASE = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : PreTrainedTokenizerBase
snake_case__ : Union[bool, str, PaddingStrategy] = True
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
def __call__( self : int , UpperCAmelCase__ : Any ) -> str:
__SCREAMING_SNAKE_CASE = "label" if "label" in features[0].keys() else "labels"
__SCREAMING_SNAKE_CASE = [feature.pop(UpperCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(features[0]["input_ids"] )
__SCREAMING_SNAKE_CASE = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE = list(chain(*UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__SCREAMING_SNAKE_CASE = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE = data_args.validation_file
__SCREAMING_SNAKE_CASE = data_args.train_file.split("." )[-1]
__SCREAMING_SNAKE_CASE = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE = [f"""ending{i}""" for i in range(4 )]
__SCREAMING_SNAKE_CASE = "sent1"
__SCREAMING_SNAKE_CASE = "sent2"
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__SCREAMING_SNAKE_CASE = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE = examples[question_header_name]
__SCREAMING_SNAKE_CASE = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__SCREAMING_SNAKE_CASE = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["train"]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__SCREAMING_SNAKE_CASE = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__SCREAMING_SNAKE_CASE = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_predictions
__SCREAMING_SNAKE_CASE = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("train" , lowerCAmelCase_ )
trainer.save_metrics("train" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("eval" , lowerCAmelCase_ )
trainer.save_metrics("eval" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 54 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(snake_case_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 106 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = "backbone." if is_semantic else ""
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(F'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(F'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(F'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ = "backbone." if is_semantic else ""
# queries, keys and values
UpperCAmelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = q_bias
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ = gamma_a
UpperCAmelCase__ = gamma_a
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dct.pop(_lowerCAmelCase )
UpperCAmelCase__ = val
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = False if "rvlcdip" in checkpoint_url else True
UpperCAmelCase__ = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ = 1024
UpperCAmelCase__ = 4096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ = 16
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "rvlcdip-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
UpperCAmelCase__ = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase__ = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
UpperCAmelCase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits
# verify logits
UpperCAmelCase__ = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
UpperCAmelCase__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 169 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , **lowerCamelCase :Dict ) -> int:
super().__init__(**lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase )
def UpperCAmelCase_ ( self :Any , **lowerCamelCase :int ) -> int:
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , *lowerCamelCase :str , lowerCamelCase :Optional[Any]=None , lowerCamelCase :int=None , **lowerCamelCase :Optional[Any] ) -> str:
return super().__call__(lowerCamelCase , *lowerCamelCase , num_workers=lowerCamelCase , batch_size=lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any]=64 , lowerCamelCase :int = 0 , lowerCamelCase :float = 512 / 1500 , lowerCamelCase :Optional[int] = 32 , lowerCamelCase :Optional[int] = 1 , ) -> Any:
UpperCAmelCase__ = load_image(lowerCamelCase )
UpperCAmelCase__ = self.image_processor.size["longest_edge"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.image_processor(images=lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(lowerCamelCase , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any]=0.88 , lowerCamelCase :Optional[Any]=0.95 , lowerCamelCase :Tuple=0 , lowerCamelCase :Union[str, Any]=1 , ) -> Dict:
UpperCAmelCase__ = model_inputs.pop("input_boxes" )
UpperCAmelCase__ = model_inputs.pop("is_last" )
UpperCAmelCase__ = model_inputs.pop("original_sizes" ).tolist()
UpperCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCAmelCase__ = self.model(**lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs["pred_masks"]
UpperCAmelCase__ = self.image_processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , binarize=lowerCamelCase )
UpperCAmelCase__ = model_outputs["iou_scores"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self :int , lowerCamelCase :str , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :int=0.7 , ) -> Union[str, Any]:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = defaultdict(lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 169 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 293 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowerCAmelCase_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowerCAmelCase_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) ,reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ,_snake_case : str ,_snake_case : List[str]=None ,_snake_case : Optional[Any]=1 ,_snake_case : List[str]="binary" ,_snake_case : List[str]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = fa_score(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,pos_label=_SCREAMING_SNAKE_CASE ,average=_SCREAMING_SNAKE_CASE ,sample_weight=_SCREAMING_SNAKE_CASE )
return {"f1": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 16 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa'
__UpperCamelCase : List[str] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__UpperCamelCase : Optional[int] = 'document_qa'
__UpperCamelCase : Optional[int] = AutoProcessor
__UpperCamelCase : Tuple = VisionEncoderDecoderModel
__UpperCamelCase : Any = ['image', 'text']
__UpperCamelCase : Optional[Any] = ['text']
def __init__(self , *__lowercase , **__lowercase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase )
__lowerCAmelCase = self.pre_processor.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case (self , __lowercase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0]
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token
__lowerCAmelCase = self.pre_processor.tokenajson(__lowercase )
return sequence["answer"]
| 9 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __A ( lowerCamelCase_ ):
"""simple docstring"""
return x + 2
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = """x = 3"""
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3} )
SCREAMING_SNAKE_CASE : Union[str, Any] = """x = y"""
SCREAMING_SNAKE_CASE : List[str] = {"""y""": 5}
SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 5, """y""": 5} )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = """y = add_two(x)"""
SCREAMING_SNAKE_CASE : Optional[int] = {"""x""": 3}
SCREAMING_SNAKE_CASE : str = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = """x = 3"""
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3} )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = """test_dict = {'x': x, 'y': add_two(x)}"""
SCREAMING_SNAKE_CASE : Any = {"""x""": 3}
SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = """x = 3\ny = 5"""
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 5} )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = """text = f'This is x: {x}.'"""
SCREAMING_SNAKE_CASE : Dict = {"""x""": 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
SCREAMING_SNAKE_CASE : Any = {"""x""": 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 2} )
SCREAMING_SNAKE_CASE : Optional[int] = {"""x""": 8}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 8, """y""": 5} )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """test_list = [x, add_two(x)]"""
SCREAMING_SNAKE_CASE : List[str] = {"""x""": 3}
SCREAMING_SNAKE_CASE : Any = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [3, 5] )
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = """y = x"""
SCREAMING_SNAKE_CASE : Any = {"""x""": 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {} , state=lowerCamelCase_ )
assert result == 3
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """y""": 3} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = """test_list = [x, add_two(x)]\ntest_list[1]"""
SCREAMING_SNAKE_CASE : int = {"""x""": 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
SCREAMING_SNAKE_CASE : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
SCREAMING_SNAKE_CASE : str = {"""x""": 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(lowerCamelCase_ , {"""add_two""": add_two} , state=lowerCamelCase_ )
assert result == 5
self.assertDictEqual(lowerCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = """x = 0\nfor i in range(3):\n x = i"""
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Tuple = evaluate(lowerCamelCase_ , {"""range""": range} , state=lowerCamelCase_ )
assert result == 2
self.assertDictEqual(lowerCamelCase_ , {"""x""": 2, """i""": 2} )
| 323 |
'''simple docstring'''
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number | (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number & ~(1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return number ^ (1 << position)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_: Optional[int] =1.6_0_2_1E-1_9 # units = C
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | '''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( UpperCamelCase__ ):
def __init__(self : Union[str, Any] , __a : VQModel , __a : UNetaDModel , __a : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=__a , unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self : str , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : float = 0.0 , __a : int = 50 , __a : Optional[str] = "pil" , __a : bool = True , **__a : Optional[int] , ):
UpperCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__a , )
UpperCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase_ = self.scheduler.scale_model_input(__a , __a )
# predict the noise residual
UpperCAmelCase_ = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a , **__a ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase_ = self.vqvae.decode(__a ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 106 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
lowercase__ = "resnet"
lowercase__ = ["basic", "bottleneck"]
def __init__( self : str ,lowercase_ : Optional[Any]=3 ,lowercase_ : List[Any]=6_4 ,lowercase_ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase_ : str=[3, 4, 6, 3] ,lowercase_ : str="bottleneck" ,lowercase_ : List[Any]="relu" ,lowercase_ : Dict=False ,lowercase_ : List[str]=None ,lowercase_ : Tuple=None ,**lowercase_ : List[str] ,):
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : str = embedding_size
lowerCAmelCase__ : str = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : Optional[Any] = layer_type
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Union[str, Any] = downsample_in_first_stage
lowerCAmelCase__ : str = ['''stem'''] + [F'stage{idx}' for idx in range(1 ,len(lowercase_ ) + 1 )]
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = get_aligned_output_features_output_indices(
out_features=lowercase_ ,out_indices=lowercase_ ,stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : int ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Dict ):
return 1E-3
| 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase : str = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 1 |
'''simple docstring'''
from math import pow, sqrt
def UpperCamelCase( *UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase_ , UpperCAmelCase_ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 358 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 280 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__A = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
__A = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__A = dict(zip(vocab, range(len(vocab))))
__A = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
__A = Path(tmpdirname)
__A = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
__A = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
__A = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
__A = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__A = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__A = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__A = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__A = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 293 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 1 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a : Optional[int] = '''true'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str]=82 , _lowercase : str=16 ) ->Optional[Any]:
'''simple docstring'''
set_seed(42 )
a : List[str] = RegressionModel()
a : List[str] = deepcopy(SCREAMING_SNAKE_CASE_ )
a : int = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
a : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
a, a : Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( _lowercase : Accelerator , _lowercase : Optional[int]=False ) ->Tuple:
'''simple docstring'''
a : int = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a : Optional[Any] = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_lowercase : List[Any] ):
a : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
a : List[str] = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
a : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowercase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int ) ->str:
'''simple docstring'''
a : str = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
a : Optional[Any] = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
a : int = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
a, a : Tuple = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : List[str] , _lowercase : Optional[int] ) ->Dict:
'''simple docstring'''
a : Any = []
for batch in dataloader:
a, a : Any = batch.values()
with torch.no_grad():
a : List[str] = model(SCREAMING_SNAKE_CASE_ )
a, a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a, a : Optional[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
a, a : Dict = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def _SCREAMING_SNAKE_CASE ( _lowercase : Accelerator , _lowercase : Union[str, Any]=82 , _lowercase : Union[str, Any]=False , _lowercase : Any=False , _lowercase : Optional[int]=16 ) ->Optional[int]:
'''simple docstring'''
a, a, a : Optional[Any] = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a, a : Any = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}"""
def _SCREAMING_SNAKE_CASE ( _lowercase : bool = False , _lowercase : bool = False ) ->Any:
'''simple docstring'''
a : Union[str, Any] = evaluate.load("glue" , "mrpc" )
a, a : str = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
a, a, a : Any = setup["no"]
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
a : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
a : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch["labels"] )
a : Optional[Any] = metric.compute()
# Then do distributed
a, a, a : List[Any] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
a : Optional[int] = outputs.logits.argmax(dim=-1 )
a : Union[str, Any] = batch["labels"]
a, a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
a : List[str] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : Optional[Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : Optional[Any] = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a : Dict = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 512 )
accelerator.state._reset_state()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 363 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a : Optional[int] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] ="""albert"""
def __init__( self , lowerCAmelCase__=3_0000 , lowerCAmelCase__=128 , lowerCAmelCase__=4096 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=64 , lowerCAmelCase__=1_6384 , lowerCAmelCase__=1 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.1 , lowerCAmelCase__="absolute" , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a : str = vocab_size
a : Optional[Any] = embedding_size
a : List[str] = hidden_size
a : Optional[int] = num_hidden_layers
a : Optional[Any] = num_hidden_groups
a : str = num_attention_heads
a : Optional[int] = inner_group_num
a : List[Any] = hidden_act
a : str = intermediate_size
a : List[Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : int = layer_norm_eps
a : List[str] = classifier_dropout_prob
a : int = position_embedding_type
class __UpperCamelCase ( a__ ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__(self , UpperCAmelCase , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"""
f""" {type(lowerCAmelCase__ )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def lowercase (self ) -> int:
return len(self.sp_model ) + self.offset
def lowercase (self ) -> Dict[str, int]:
_snake_case = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__(self , UpperCAmelCase ) -> Tuple:
_snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def lowercase (self , UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def lowercase (self , UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase (self , UpperCAmelCase ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_snake_case = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def lowercase (self , UpperCAmelCase=False ) -> Dict:
return 1
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,) | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : List[str] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self ):
return F'''{self.data}'''
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : Node[T] | None = None
def __iter__( self ):
_lowerCamelCase : List[Any] = self.top
while node:
yield node.data
_lowerCamelCase : int = node.next
def __str__( self ):
return "->".join([str(lowercase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def A_ ( self ):
return self.top is None
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = Node(lowercase )
if not self.is_empty():
_lowerCamelCase : Dict = self.top
_lowerCamelCase : Optional[Any] = node
def A_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , lowercase )
_lowerCamelCase : Dict = self.top
_lowerCamelCase : int = self.top.next
return pop_node.data
def A_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def A_ ( self ):
_lowerCamelCase : Dict = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 12 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Any ,*lowercase_ : Dict ,**lowercase_ : Tuple ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' ,lowercase_ ,)
super().__init__(*lowercase_ ,**lowercase_ )
| 106 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : Any = logging.getLogger(__name__)
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
__UpperCamelCase : Any = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
__UpperCamelCase : List[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__UpperCamelCase : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__UpperCamelCase : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__UpperCamelCase : Dict = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 106 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_UpperCAmelCase = re.compile(r"""\s+""")
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(__lowercase ,'' ,example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = [len(__lowercase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__lowercase ), "line_max": max(__lowercase )}
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Tuple = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[str] ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[str]=5 ):
'''simple docstring'''
A_ : int = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : Dict = example['content'].splitlines()
for _, line in zip(range(__lowercase ) ,__lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : int=5 ,__lowercase : int=0.05 ):
'''simple docstring'''
A_ : List[Any] = ['unit tests', 'test file', 'configuration file']
A_ : List[Any] = example['content'].splitlines()
A_ : Any = 0
A_ : Optional[Any] = 0
# first test
for _, line in zip(range(__lowercase ) ,__lowercase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
A_ : Optional[int] = ['def ', 'class ', 'for ', 'while ']
A_ : Dict = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[str]=4 ):
'''simple docstring'''
A_ : int = example['content'].splitlines()
A_ : str = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = tokenizer(example['content'] ,truncation=__lowercase )['input_ids']
A_ : Optional[int] = len(example['content'] ) / len(__lowercase )
return {"ratio": ratio}
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : int = {}
results.update(get_hash(__lowercase ) )
results.update(line_stats(__lowercase ) )
results.update(alpha_stats(__lowercase ) )
results.update(char_token_ratio(__lowercase ) )
results.update(is_autogenerated(__lowercase ) )
results.update(is_config_or_test(__lowercase ) )
results.update(has_no_keywords(__lowercase ) )
results.update(has_few_assignments(__lowercase ) )
return results
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ,__lowercase : List[Any] ):
'''simple docstring'''
if not check_uniques(__lowercase ,__lowercase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
with open(__lowercase ,'rb' ) as f_in:
with gzip.open(str(__lowercase ) + '.gz' ,'wb' ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowercase ,__lowercase )
os.unlink(__lowercase )
# Settings
_UpperCAmelCase = HfArgumentParser(PreprocessingArguments)
_UpperCAmelCase = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase = multiprocessing.cpu_count()
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_UpperCAmelCase = time.time()
_UpperCAmelCase = load_dataset(args.dataset_name, split="""train""")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_UpperCAmelCase = time.time()
_UpperCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_UpperCAmelCase = set(ds.unique("""hash"""))
_UpperCAmelCase = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_UpperCAmelCase = time.time()
_UpperCAmelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_UpperCAmelCase = time.time()
_UpperCAmelCase ,_UpperCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_UpperCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
_UpperCAmelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
_UpperCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_UpperCAmelCase = str(data_dir / F"""file-{file_number+1:012}.json""")
_UpperCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 360 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase__: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( _lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> List[str]:
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _lowerCAmelCase , )
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : int = image[0].size
UpperCAmelCase , UpperCAmelCase : Any = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase : Union[str, Any] = np.concatenate(_lowerCAmelCase , axis=0 )
UpperCAmelCase : str = np.array(_lowerCAmelCase ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase : Dict = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Union[str, Any] = 2.0 * image - 1.0
UpperCAmelCase : Any = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def snake_case_ ( _lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ) -> Tuple:
if isinstance(_lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase : Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Any = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
UpperCAmelCase : Union[str, Any] = np.concatenate(_lowerCAmelCase , axis=0 )
UpperCAmelCase : List[Any] = mask.astype(np.floataa ) / 2_5_5.0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Any = 1
UpperCAmelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : Dict = torch.cat(_lowerCAmelCase , dim=0 )
return mask
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : Tuple , __snake_case : Optional[Any] , __snake_case : str ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Union[str, Any] , __snake_case : Union[torch.Tensor, PIL.Image.Image] , __snake_case : Union[torch.Tensor, PIL.Image.Image] , __snake_case : int = 250 , __snake_case : float = 0.0 , __snake_case : int = 10 , __snake_case : int = 10 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : int = image
UpperCAmelCase : int = _preprocess_image(__snake_case )
UpperCAmelCase : int = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Tuple = _preprocess_mask(__snake_case )
UpperCAmelCase : List[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Dict = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : Union[str, Any] = original_image.shape
UpperCAmelCase : Optional[Any] = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case , __snake_case , __snake_case , self.device )
UpperCAmelCase : Any = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : Any = generator[0] if isinstance(__snake_case , __snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : Optional[int] = self.unet(__snake_case , __snake_case ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Union[str, Any] = self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : Tuple = self.scheduler.undo_step(__snake_case , __snake_case , __snake_case )
UpperCAmelCase : int = t
UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 23 |
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> tuple[float | int, list[tuple[int, int]]]:
__A , __A : int = grid.shape
__A : Any = [-1, 1, 0, 0]
__A : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__A , __A : Optional[int] = [(0, source)], set()
__A : Any = np.full((rows, cols) , np.inf )
__A : Any = 0
__A : Any = np.empty((rows, cols) , dtype=a )
__A : Optional[Any] = None
while queue:
((__A) , (__A)) : List[str] = heappop(a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__A : int = []
while (x, y) != source:
path.append((x, y) )
__A , __A : Optional[int] = predecessors[x, y]
path.append(a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a ) ):
__A , __A : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__A : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a , (dist + 1, (nx, ny)) )
__A : List[Any] = dist + 1
__A : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = DebertaTokenizer
a_ : int = True
a_ : Tuple = DebertaTokenizerFast
def lowerCamelCase ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowerCAmelCase_ : Any = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : int = {"unk_token": "[UNK]"}
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : int , **a_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Dict , a_ : Tuple ):
lowerCAmelCase_ : List[str] = "lower newer"
lowerCAmelCase_ : Optional[int] = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = "lower newer"
lowerCAmelCase_ : List[str] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : str = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : int = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : str = tokenizer("Hello" , "World" )
lowerCAmelCase_ : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , a_ )
@slow
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
lowerCAmelCase_ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(a_ )
lowerCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase_ : Optional[int] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowerCAmelCase_ : int = tokenizer(a_ , padding=a_ )
lowerCAmelCase_ : Optional[int] = [tokenizer.decode(a_ , skip_special_tokens=a_ ) for seq in encoding["input_ids"]]
# fmt: off
lowerCAmelCase_ : str = {
"input_ids": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase_ : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , a_ )
for expected, decoded in zip(a_ , a_ ):
self.assertEqual(a_ , a_ )
| 161 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
lowercase__ = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 161 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase_ = datasets.logging.get_logger(__name__)
lowerCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
if self.config_name == "default":
_A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_A = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ):
'''simple docstring'''
if gpus is None:
_A = 1 if torch.cuda.is_available() else 0
_A = {"src": sources, "mt": predictions, "ref": references}
_A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )]
_A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 79 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_UpperCAmelCase : Optional[int] = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Tuple = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Optional[int] = max(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase_ ) , b_binary.zfill(lowerCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __A ( lowerCAmelCase_ ):
return EnvironmentCommand()
def __A ( lowerCAmelCase_ ):
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( __a ):
@staticmethod
def snake_case_ (lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCAmelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCAmelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__(self , lowerCAmelCase__ , *lowerCAmelCase__ ):
_UpperCAmelCase : str = accelerate_config_file
def snake_case_ (self ):
_UpperCAmelCase : Dict = """not installed"""
if is_safetensors_available():
import safetensors
_UpperCAmelCase : Any = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_UpperCAmelCase : Optional[Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
_UpperCAmelCase : str = """not installed"""
_UpperCAmelCase : List[Any] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCAmelCase : List[str] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCAmelCase : Optional[Any] = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else F"\t{accelerate_config}"
)
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : int = """NA"""
if is_torch_available():
import torch
_UpperCAmelCase : int = torch.__version__
_UpperCAmelCase : Optional[Any] = torch.cuda.is_available()
_UpperCAmelCase : Optional[Any] = """not installed"""
_UpperCAmelCase : Tuple = """NA"""
if is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Dict = tf.__version__
try:
# deprecated in v2.1
_UpperCAmelCase : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCAmelCase : Any = bool(tf.config.list_physical_devices("""GPU""" ) )
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : Optional[Any] = """not installed"""
_UpperCAmelCase : Dict = """not installed"""
_UpperCAmelCase : Tuple = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCAmelCase : str = flax.__version__
_UpperCAmelCase : Optional[Any] = jax.__version__
_UpperCAmelCase : Optional[int] = jaxlib.__version__
_UpperCAmelCase : Tuple = jax.lib.xla_bridge.get_backend().platform
_UpperCAmelCase : str = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def snake_case_ (lowerCAmelCase__ ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 170 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('T')
class lowerCamelCase__( Generic[T]):
def __init__( self: List[str] , UpperCamelCase_: T ):
__lowerCamelCase = data
__lowerCamelCase = None
def __str__( self: Optional[Any] ):
return F'{self.data}'
class lowerCamelCase__( Generic[T]):
def __init__( self: List[str] ):
__lowerCamelCase = None
def __iter__( self: Tuple ):
__lowerCamelCase = self.top
while node:
yield node.data
__lowerCamelCase = node.next
def __str__( self: Any ):
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __len__( self: List[Any] ):
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
return self.top is None
def lowerCAmelCase__ ( self: str , UpperCamelCase_: T ):
__lowerCamelCase = Node(UpperCamelCase_ )
if not self.is_empty():
__lowerCamelCase = self.top
__lowerCamelCase = node
def lowerCAmelCase__ ( self: int ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , UpperCamelCase_ )
__lowerCamelCase = self.top
__lowerCamelCase = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self: str ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=8 ):
"""simple docstring"""
a :List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a :int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
a :Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if latents is None:
a :str = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
a :Any = latents.to(_lowerCamelCase )
a :Dict = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a :int = torch.device(F'''cuda:{gpu_id}''' )
a :int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a :Any = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a :Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a :List[str] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
a :str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE__ ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
a :int = self._execution_device
a :Optional[Any] = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
a :Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCamelCase , _lowerCamelCase ):
a :List[str] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
a :Union[str, Any] = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
a :Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
a :Optional[Any] = self.scheduler.timesteps
a :List[str] = self.unet.config.in_channels
a , a :str = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
# create initial latent
a :int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
a :Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a :Union[str, Any] = {'''image_embeds''': image_embeds}
a :Optional[Any] = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
a , a :Any = noise_pred.split(latents.shape[1] , dim=1 )
a , a :List[str] = noise_pred.chunk(2 )
a , a :int = variance_pred.chunk(2 )
a :List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a :Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a :Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a :int = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
a :int = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
a :str = image * 0.5 + 0.5
a :List[Any] = image.clamp(0 , 1 )
a :str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a :str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 94 |
from math import factorial
def UpperCamelCase (lowercase_: int = 20 ) -> int:
A__ : Union[str, Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A__ : Tuple = n // 2
return int(factorial(lowercase_ ) / (factorial(lowercase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 192 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__UpperCamelCase =Vector()
def UpperCAmelCase_ ( self : List[str] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '''(0,0,0,0,0,1)''' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2] )
__UpperCamelCase =Vector([1, 2, 3, 4, 5] )
__UpperCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__UpperCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def UpperCAmelCase_ ( self : str ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCAmelCase_ ( self : int ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCAmelCase_ ( self : Tuple ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([2, -1, 4] ) # for test of dot product
__UpperCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCAmelCase_ ( self : str ) -> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 2, 3] )
__UpperCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '''(3,4,7)''' )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 0, 0, 0, 0, 0] )
__UpperCamelCase =x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '''(0,1,0)''' )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCAmelCase_ ( self : int ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__UpperCamelCase =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
__UpperCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__UpperCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCAmelCase_ ( self : Dict ) -> None:
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 365 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a__ : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , *_A :int , **_A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase_ ( self :List[Any] , _A :Tuple=None , _A :Dict=None , _A :List[Any]=None ) -> int:
'''simple docstring'''
__A = {}
__A = {}
if prompt is not None:
__A = prompt
if generate_kwargs is not None:
__A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self :Union[str, Any] , _A :Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A :Optional[Any] ) -> Any:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self :Optional[Any] , _A :Optional[Any] , _A :Dict=None ) -> List[Any]:
'''simple docstring'''
__A = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__A = self.model.config.model_type
if model_type == "git":
__A = self.image_processor(images=_A , return_tensors=self.framework )
__A = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__A = [self.tokenizer.cls_token_id] + input_ids
__A = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__A = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__A = self.image_processor(images=_A , return_tensors=self.framework )
__A = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__A = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__A = None
return model_inputs
def lowercase_ ( self :int , _A :Dict , _A :Any=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__A = None
if generate_kwargs is None:
__A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__A = model_inputs.pop(self.model.main_input_name )
__A = self.model.generate(_A , **_A , **_A )
return model_outputs
def lowercase_ ( self :Union[str, Any] , _A :Dict ) -> Tuple:
'''simple docstring'''
__A = []
for output_ids in model_outputs:
__A = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records
| 161 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A = cva.getAffineTransform(UpperCAmelCase , UpperCAmelCase )
return cva.warpAffine(UpperCAmelCase , UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a__ : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Optional[int] = gray_img.shape
# set different points to rotate image
a__ : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : int = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : List[str] = plt.figure(1)
a__ : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 161 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a_ ( __UpperCamelCase ):
'''simple docstring'''
__a: Optional[Any] = """big_bird"""
def __init__( self , lowercase_=5_0_3_5_8 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4_0_9_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=6_6 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=6_4 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> List[str]:
'''simple docstring'''
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = rescale_embeddings
lowerCAmelCase_ = attention_type
lowerCAmelCase_ = use_bias
lowerCAmelCase_ = block_size
lowerCAmelCase_ = num_random_blocks
lowerCAmelCase_ = classifier_dropout
class a_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 355 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
a__ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self , __lowercase = 1 , __lowercase = None , __lowercase = 0.0 , __lowercase = 5_0 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , __lowercase ):
a__ : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
a__ : Any = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a__ : str = randn_tensor(__lowercase , generator=__lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a__ : int = self.unet(__lowercase , __lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__ : List[str] = self.scheduler.step(
__lowercase , __lowercase , __lowercase , eta=__lowercase , use_clipped_model_output=__lowercase , generator=__lowercase ).prev_sample
a__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
a__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : List[str] = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 170 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[int]:
"""simple docstring"""
a__ : int = DPTConfig(embedding_type="""hybrid""")
if "large" in checkpoint_url:
a__ : Tuple = 1024
a__ : int = 4096
a__ : str = 24
a__ : List[str] = 16
a__ : Optional[Any] = [5, 11, 17, 23]
a__ : Union[str, Any] = [256, 512, 1024, 1024]
a__ : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a__ : Dict = 768
a__ : Dict = [1, 1, 1, 0.5]
a__ : Dict = [256, 512, 768, 768]
a__ : Union[str, Any] = 150
a__ : List[Any] = 16
a__ : List[Any] = (1, 384, 384)
a__ : Optional[Any] = False
a__ : Tuple = """project"""
if "ade" in checkpoint_url:
a__ : int = True
a__ : Any = 768
a__ : Tuple = [1, 1, 1, 0.5]
a__ : str = 150
a__ : Optional[int] = 16
a__ : Optional[Any] = """huggingface/label-files"""
a__ : Any = """ade20k-id2label.json"""
a__ : List[Any] = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="""dataset""")) , """r"""))
a__ : Union[str, Any] = {int(_lowercase): v for k, v in idalabel.items()}
a__ : List[Any] = idalabel
a__ : List[Any] = {v: k for k, v in idalabel.items()}
a__ : List[str] = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[str]:
"""simple docstring"""
a__ : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> Optional[int]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a__ : int = name.replace("""pretrained.model""" , """dpt.encoder""")
if "pretrained.model" in name:
a__ : Optional[Any] = name.replace("""pretrained.model""" , """dpt.embeddings""")
if "patch_embed" in name:
a__ : Any = name.replace("""patch_embed""" , """""")
if "pos_embed" in name:
a__ : Optional[Any] = name.replace("""pos_embed""" , """position_embeddings""")
if "attn.proj" in name:
a__ : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""")
if "proj" in name and "project" not in name:
a__ : List[Any] = name.replace("""proj""" , """projection""")
if "blocks" in name:
a__ : int = name.replace("""blocks""" , """layer""")
if "mlp.fc1" in name:
a__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
a__ : Tuple = name.replace("""mlp.fc2""" , """output.dense""")
if "norm1" in name and "backbone" not in name:
a__ : List[str] = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name and "backbone" not in name:
a__ : List[str] = name.replace("""norm2""" , """layernorm_after""")
if "scratch.output_conv" in name:
a__ : int = name.replace("""scratch.output_conv""" , """head""")
if "scratch" in name:
a__ : List[Any] = name.replace("""scratch""" , """neck""")
if "layer1_rn" in name:
a__ : Optional[Any] = name.replace("""layer1_rn""" , """convs.0""")
if "layer2_rn" in name:
a__ : List[Any] = name.replace("""layer2_rn""" , """convs.1""")
if "layer3_rn" in name:
a__ : Dict = name.replace("""layer3_rn""" , """convs.2""")
if "layer4_rn" in name:
a__ : Optional[int] = name.replace("""layer4_rn""" , """convs.3""")
if "refinenet" in name:
a__ : int = int(name[len("""neck.refinenet""") : len("""neck.refinenet""") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a__ : int = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
a__ : Optional[Any] = name.replace("""out_conv""" , """projection""")
if "resConfUnit1" in name:
a__ : int = name.replace("""resConfUnit1""" , """residual_layer1""")
if "resConfUnit2" in name:
a__ : Union[str, Any] = name.replace("""resConfUnit2""" , """residual_layer2""")
if "conv1" in name:
a__ : Dict = name.replace("""conv1""" , """convolution1""")
if "conv2" in name:
a__ : Any = name.replace("""conv2""" , """convolution2""")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a__ : List[str] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""")
if "pretrained.act_postprocess2.0.project.0" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""")
if "pretrained.act_postprocess3.0.project.0" in name:
a__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""")
if "pretrained.act_postprocess4.0.project.0" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a__ : int = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""")
if "pretrained.act_postprocess1.4" in name:
a__ : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""")
if "pretrained.act_postprocess2.3" in name:
a__ : List[Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""")
if "pretrained.act_postprocess2.4" in name:
a__ : Dict = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""")
if "pretrained.act_postprocess3.3" in name:
a__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""")
if "pretrained.act_postprocess4.3" in name:
a__ : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""")
if "pretrained.act_postprocess4.4" in name:
a__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""")
if "pretrained" in name:
a__ : List[str] = name.replace("""pretrained""" , """dpt""")
if "bn" in name:
a__ : int = name.replace("""bn""" , """batch_norm""")
if "head" in name:
a__ : Optional[Any] = name.replace("""head""" , """head.head""")
if "encoder.norm" in name:
a__ : Optional[int] = name.replace("""encoder.norm""" , """layernorm""")
if "auxlayer" in name:
a__ : Optional[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""")
if "backbone" in name:
a__ : int = name.replace("""backbone""" , """backbone.bit.encoder""")
if ".." in name:
a__ : str = name.replace("""..""" , """.""")
if "stem.conv" in name:
a__ : Optional[int] = name.replace("""stem.conv""" , """bit.embedder.convolution""")
if "blocks" in name:
a__ : Optional[int] = name.replace("""blocks""" , """layers""")
if "convolution" in name and "backbone" in name:
a__ : Dict = name.replace("""convolution""" , """conv""")
if "layer" in name and "backbone" in name:
a__ : Tuple = name.replace("""layer""" , """layers""")
if "backbone.bit.encoder.bit" in name:
a__ : Optional[Any] = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""")
if "embedder.conv" in name:
a__ : int = name.replace("""embedder.conv""" , """embedder.convolution""")
if "backbone.bit.encoder.stem.norm" in name:
a__ : Union[str, Any] = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""")
return name
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Union[str, Any]) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Any = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''')
a__ : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
a__ : Any = in_proj_weight[: config.hidden_size, :]
a__ : Dict = in_proj_bias[: config.hidden_size]
a__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
a__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a__ : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase).raw)
return im
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Optional[Any]) -> int:
"""simple docstring"""
a__ , a__ : int = get_dpt_config(_lowercase)
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a__ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""")
# remove certain keys
remove_ignore_keys_(_lowercase)
# rename keys
for key in state_dict.copy().keys():
a__ : int = state_dict.pop(_lowercase)
a__ : str = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase)
# load HuggingFace model
a__ : List[Any] = DPTForSemanticSegmentation(_lowercase) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowercase)
model.load_state_dict(_lowercase)
model.eval()
# Check outputs on an image
a__ : List[Any] = 480 if """ade""" in checkpoint_url else 384
a__ : str = DPTImageProcessor(size=_lowercase)
a__ : Tuple = prepare_img()
a__ : List[str] = image_processor(_lowercase , return_tensors="""pt""")
# forward pass
a__ : Any = model(**_lowercase).logits if """ade""" in checkpoint_url else model(**_lowercase).predicted_depth
if show_prediction:
a__ : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=_lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase).mkdir(exist_ok=_lowercase)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowercase)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowercase)
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""")
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""")
if __name__ == "__main__":
_lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_lowercase : str =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 170 | 1 |
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> Dict:
'''simple docstring'''
if name is None:
snake_case : Union[str, Any] = None
else:
snake_case : Union[str, Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
snake_case : int = fmt.format(A__ )
# Print and recurse (if needed).
if isinstance(A__ , A__ ):
if msg is not None:
print(A__ )
for k in val.keys():
recursive_print(A__ , val[k] , spaces + 2 )
elif isinstance(A__ , torch.Tensor ):
print(A__ , ''':''' , val.size() )
else:
print(A__ , ''':''' , A__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
snake_case : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case : Union[str, Any] = param.view(*A__ )
snake_case : Optional[Any] = param.transpose(0 , 2 )
snake_case : Optional[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case : int = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case : Optional[Any] = param.view(*A__ )
snake_case : List[str] = param.transpose(0 , 1 ).contiguous()
snake_case : str = param.view(*A__ )
return param
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = {}
# old versions did not store training args
snake_case : Dict = input_state_dict.get('''args''' , A__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case : Optional[int] = ds_args.padded_vocab_size
snake_case : Dict = ds_args.max_position_embeddings
snake_case : List[Any] = ds_args.hidden_size
snake_case : Optional[Any] = ds_args.num_layers
snake_case : List[Any] = ds_args.num_attention_heads
snake_case : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case : List[Any] = config.n_head
# The hidden_size per head.
snake_case : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case : Optional[int] = input_state_dict['''checkpoint_version''']
else:
snake_case : Optional[Any] = 0.0
# The model.
snake_case : Dict = input_state_dict['''model''']
# The language model.
snake_case : int = model['''language_model''']
# The embeddings.
snake_case : List[Any] = lm['''embedding''']
# The word embeddings.
snake_case : Optional[Any] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
snake_case : List[str] = word_embeddings[: config.vocab_size, :]
snake_case : str = word_embeddings
# The position embeddings.
snake_case : Any = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case : Optional[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case : Dict = pos_embeddings
# The transformer.
snake_case : Union[str, Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
snake_case : Dict = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
snake_case : int = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case : Union[str, Any] = layer_re.match(A__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case : int = int(m.group(1 ) )
# The name of the operation.
snake_case : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
snake_case : Optional[Any] = m.group(3 )
# The name of the layer.
snake_case : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
snake_case : int = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
snake_case : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A__ , A__ )
snake_case : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case : Tuple = masked_bias
snake_case : int = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case : Tuple = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case : Tuple = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Store. No change of shape.
snake_case : Optional[int] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case : Tuple = megatron_to_transformers[op_name]
snake_case : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case : Optional[Any] = megatron_to_transformers[op_name]
snake_case : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case : Any = transformer['''final_layernorm.weight''']
snake_case : List[Any] = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=A__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=A__ , help='''An optional config json file describing the pre-trained model.''' , )
snake_case : Optional[Any] = parser.parse_args()
# Extract the basename.
snake_case : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
snake_case : Optional[int] = torch.load(A__ , map_location='''cpu''' )
else:
snake_case : str = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
snake_case : str = input_state_dict.get('''args''' , A__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case : Dict = '''gelu_fast'''
elif ds_args.openai_gelu:
snake_case : Tuple = '''gelu_new'''
else:
snake_case : Any = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
snake_case : int = '''gelu_new'''
# Spell out all parameters in case the defaults change.
snake_case : Dict = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
snake_case : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case : List[Any] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
snake_case : str = convert_megatron_checkpoint(A__ , A__ , A__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A__ , A__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case : Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case : Tuple = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
snake_case : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case : Optional[int] = '''gpt2'''
snake_case : Optional[int] = AutoTokenizer.from_pretrained(A__ )
snake_case : Any = type(A__ ).__name__
snake_case : Optional[Any] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(A__ )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(A__ )
# Store the state_dict to file.
snake_case : Optional[int] = os.path.join(A__ , '''pytorch_model.bin''' )
print(F'Saving checkpoint to \"{output_checkpoint_file}\"' )
torch.save(A__ , A__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 358 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """efficientnet"""
def __init__( self : str , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 600 , UpperCamelCase__ : float = 2.0 , UpperCamelCase__ : float = 3.1 , UpperCamelCase__ : int = 8 , UpperCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ : List[int] = [] , UpperCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ : float = 0.25 , UpperCamelCase__ : str = "swish" , UpperCamelCase__ : int = 2560 , UpperCamelCase__ : str = "mean" , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 0.001 , UpperCamelCase__ : float = 0.99 , UpperCamelCase__ : float = 0.5 , UpperCamelCase__ : float = 0.2 , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Dict = num_channels
snake_case : List[Any] = image_size
snake_case : Any = width_coefficient
snake_case : int = depth_coefficient
snake_case : List[str] = depth_divisor
snake_case : Tuple = kernel_sizes
snake_case : Optional[Any] = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Dict = depthwise_padding
snake_case : Optional[Any] = strides
snake_case : List[str] = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Any = squeeze_expansion_ratio
snake_case : Optional[Any] = hidden_act
snake_case : Optional[int] = hidden_dim
snake_case : Dict = pooling_type
snake_case : Any = initializer_range
snake_case : Optional[Any] = batch_norm_eps
snake_case : Tuple = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : str = drop_connect_rate
snake_case : Dict = sum(UpperCamelCase__ ) * 4
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
return 1e-5
| 83 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.